Compare commits

..

158 Commits

Author SHA1 Message Date
maskpp
88703e113e Add fast cache. 2023-01-31 14:10:01 +08:00
HAOYUatHZ
346c713d27 feat(redis): support ClusterClient (#268) 2023-01-30 10:48:54 +08:00
maskpp
c8c063b622 fix(docker redis): Fix redis docker randomly panic. (#267) 2023-01-29 20:46:40 +08:00
maskpp
b5549f6301 feat(cache and trace api): Add redis cache in database and add trace api in bridge.l2backend. (#228)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-29 09:56:25 +08:00
Péter Garamvölgyi
16576b6f53 fix(bridge): Handle duplicate messages (#261) 2023-01-20 12:09:47 +01:00
HAOYUatHZ
aa885f068f docs(db): fix sql comments (#260) 2023-01-20 09:44:19 +08:00
HAOYUatHZ
1f764a579d refactor(bridge): refactor layer2MessageOrm.GetL2Messages() (#243) 2023-01-20 09:29:34 +08:00
HAOYUatHZ
91ee767669 doc(db): update sql comment (#259) 2023-01-20 09:27:50 +08:00
Péter Garamvölgyi
7eac41691e feat(bridge): handle expired messages correctly (#257) 2023-01-19 23:53:34 +01:00
Péter Garamvölgyi
d9516890b0 feat(bridge): handle expired messages (#256) 2023-01-19 23:21:17 +01:00
Péter Garamvölgyi
ddb96bb732 feat(bridge): add more l1 relayer logs (#255) 2023-01-19 22:37:02 +01:00
Péter Garamvölgyi
e419dd8d5c fix(bridge): add limit to GetL1MessagesByStatus (#254) 2023-01-19 22:19:48 +01:00
Péter Garamvölgyi
c99c65bdfd fix(bridge): execute watcher loops independently (#253) 2023-01-19 21:14:45 +01:00
colin
18fd7f56a8 fix(coordinator): reset roller state when proof session timeout (#210)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-19 21:37:30 +08:00
Péter Garamvölgyi
a319dc1cff bugfix(bridge): only relay messages for finalized batches (#251)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-19 20:54:09 +08:00
colin
52bf3a55fc fix(coordinator): fix CollectProofs for multi-roller & add tests (#252)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-19 14:52:20 +08:00
Péter Garamvölgyi
598e10e4fc feat(bridge): update skipped batches in a single db operation (#242) 2023-01-18 15:21:41 +01:00
maskpp
eed3f42731 fix(coordinator): Fix bug in coordinator.GetNumberOfIdleRollers function. (#247)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-01-18 20:51:19 +08:00
colin
5a4bea8ccd fix(coordinator): set session failed when all rollers have submitted invalid proof (#241)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-18 20:01:43 +08:00
ChuhanJin
5b37b63d89 build(jenkinsfile): replace status_check with comment in coverage test (#249)
Co-authored-by: vincent <419436363@qq.com>
2023-01-18 19:44:47 +08:00
HAOYUatHZ
5e5c4f7701 build(roller&coordinator): fix Makefile (#245)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-18 16:51:55 +08:00
HAOYUatHZ
09dc638652 perf(db): add limit in block_batch queries (#240) 2023-01-17 20:46:51 +08:00
HAOYUatHZ
b598a01e7f build(jenkins): remove changeset condition (#239) 2023-01-17 07:07:58 +08:00
Scroll Dev
0fcdb6f824 doc: bump version number (#238) 2023-01-17 06:57:43 +08:00
Xi Lin
5a95dcf5ba bugfix(bridge&database): fix compute message hash and add unit tests (#233)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-01-17 06:56:21 +08:00
Xi Lin
d0c63e75df bugfix(database): make sure return order of statuses in GetRollupStatusByIDList (#235)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-16 20:57:27 +08:00
Scroll Dev
676b8a2230 Update pull_request_template.md 2023-01-15 07:46:57 +08:00
Xi Lin
a1cb3d3b87 fix(contracts&libzkp): fix mpt circuits panic (#232)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-14 14:58:37 +08:00
colin
47b4c54e05 fix(relayer): use sync map for concurrent write (#231)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-01-14 12:07:59 +08:00
maskpp
fe822a65b9 refactor(bridge): refactor bridge parallelization (#213)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-14 09:40:07 +08:00
HAOYUatHZ
9bd4931f93 chore: fix typos in pull_request template (#230) 2023-01-13 11:50:39 +08:00
HAOYUatHZ
411cb19b62 chore: add pull_request template (#229) 2023-01-13 11:46:19 +08:00
HAOYUatHZ
8f55299941 Revert "refactor(coordinator): remove debug api namespace from manager" (#224)
Co-authored-by: Nazarii Denha <dengaaa2002@gmail.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-13 08:41:36 +08:00
HAOYUatHZ
0bdcce79ba chore: bump version number (#226) 2023-01-13 08:33:53 +08:00
Péter Garamvölgyi
fcd29c305d fix(coordinator): use uint32 for timestamp to enable RLP encoding (#225) 2023-01-12 18:24:59 +01:00
Lawliet-Chan
54a6ab472a feat(message): replace json.Marshal with rlp.Encode for Signing (#219) 2023-01-12 22:21:59 +08:00
HAOYUatHZ
b2a5baa2ad feat(bridge): upgrade TraceGasCost estimation (#222) 2023-01-12 16:45:11 +08:00
Lawliet-Chan
dc6b71ca23 chore: improve golang lint rules (#172)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-12 12:55:00 +08:00
Nazarii Denha
e1247a7eb2 refactor(coordinator): remove debug api namespace from manager (#221)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-01-12 10:16:48 +08:00
colin
65699b89bb feat(coordinator): support rollers-per-session (#215)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-11 21:49:37 +08:00
maskpp
a44956a05f fix(bug): fix data race in common/cmd module. (#220) 2023-01-11 21:05:46 +08:00
maskpp
b85b4bafc2 refactor(coordinator): refactor CollectProofs func (#211)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-11 13:31:42 +08:00
colin
2e3c80c580 feat(roller): add submit proof retry (#217)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-11 12:30:33 +08:00
Péter Garamvölgyi
d24392feac feat(bridge): Propose batches, fetch blocks and events in parallel (#216) 2023-01-10 23:13:06 +08:00
ChuhanJin
5c6e20a774 build(Jenkins): add docker push job for tag (#214)
Co-authored-by: vincent <419436363@qq.com>
2023-01-10 14:43:20 +08:00
HAOYUatHZ
9f9e23ff0e chore(coordinator): add more logs (#177)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-10 09:29:43 +08:00
Péter Garamvölgyi
fa93de97de feat(bridge): Relay messages and batches in parallel (#212)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-10 09:18:36 +08:00
HAOYUatHZ
deedf7a5d0 refactor(coordinator): refactor CollectProofs (#208) 2023-01-10 08:49:14 +08:00
Xi Lin
73432127cd feat(contract): only use blockHeight to verify relay message in layer1 (#209) 2023-01-09 15:03:30 +08:00
colin
a78160ddad fix(roller): avoid discarding task when connection failure (#203)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-09 10:52:50 +08:00
Lawliet-Chan
fff2517a76 build(libzkp): update dependency common-rs -> scroll-zkevm (#204)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-09 09:32:14 +08:00
Xi Lin
eba7647e21 bugfix(bridge): add suffix to id in processingFinalization and processingCommitment (#207)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-06 14:33:21 +01:00
Xi Lin
51076d21c3 bugfix(bridge): failed to fetch rollup events on restart (#201) 2023-01-06 20:39:09 +08:00
maskpp
077ed9839a feat(bug): fix bug in l2 watcher (#199) 2023-01-05 17:32:50 +08:00
colinlyguo
bdcca55bd5 fix: roller test path (#202)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-05 11:41:45 +08:00
Xi Lin
20b8e2bf6c feat(contract): add finalization status api in contract (#197)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-04 23:26:47 +08:00
ChuhanJin
cc596c42b3 build: fix rate limit failure by using solc static bin (#200)
Co-authored-by: vincent <419436363@qq.com>
2023-01-04 21:52:10 +08:00
HAOYUatHZ
7da717b251 chore(bridge): update l2 contractEventsBlocksFetchLimit (#198) 2023-01-04 15:47:35 +08:00
HAOYUatHZ
bbdbf3995f feat(bridge): fast catch up new blocks (#184)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-04 15:43:14 +08:00
HAOYUatHZ
7fb8bc6e29 fix(test): fix TestCmd (#194) 2023-01-02 19:38:37 +08:00
HAOYUatHZ
b8fae294e4 build(Jenkins): use parallel testing (#169)
Co-authored-by: Scroll Dev <dev@scroll.io>
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2022-12-29 16:59:06 +08:00
maskpp
23bc381f5c fix(integration-test): fixtestStartProcess (#193) 2022-12-29 16:43:06 +08:00
maskpp
b4ade85a9c feat(roller): If roller is stopped, don't need to retry again. (#191)
Co-authored-by: vincent <419436363@qq.com>
2022-12-28 16:23:29 +08:00
HAOYUatHZ
d04522027c refactor: clean up codes (#187)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-12-28 10:55:56 +08:00
ChuhanJin
7422bea51f build(jenkinsfile): add coverage (#181)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-12-27 22:18:17 +08:00
Lawliet-Chan
abcc159390 chore(libzkp): short writings makefile in roller and coordinator (#189) 2022-12-27 18:58:09 +08:00
HAOYUatHZ
a545954dbc chore(bridge): increase blockTracesFetchLimit & `contractEventsBlock… (#185) 2022-12-26 17:55:03 +08:00
HAOYUatHZ
dc6ef83fbd chore: bump version number (#182) 2022-12-26 14:32:11 +08:00
HAOYUatHZ
e17647bc9f chore(batch_proposer): add more check for batchTxNumThreshold (#180) 2022-12-26 14:16:39 +08:00
HAOYUatHZ
feaa95aefe Revert "feat(libzkp): unwind panic (#163)" (#179) 2022-12-26 11:50:27 +08:00
HAOYUatHZ
8ad8a1b6f0 feat(batch_proposer): add batchTxNumThreshold (#178) 2022-12-26 10:17:16 +08:00
maskpp
22f6781c26 feat(Integration test): Enable run child process in integration-test test cases. (#102)
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-22 22:13:04 +08:00
HAOYUatHZ
b165402e81 refactor(bridge): refactor watcher confirmation check and add more logs (#173)
Co-authored-by: Scroll Dev <dev@scroll.io>
2022-12-22 21:26:52 +08:00
HAOYUatHZ
9ee8d977cb build: revert coverage test (#176) 2022-12-22 20:10:32 +08:00
HAOYUatHZ
e1a6eb65f6 build: decrease coverage threshold to 40% (#175) 2022-12-22 15:31:05 +08:00
maskpp
9096334eab chore(database): rename block_result to block_trace (#174) 2022-12-22 13:47:59 +08:00
HAOYUatHZ
c360cf52b1 build: improve dockerfiles (#171) 2022-12-20 11:04:18 +08:00
HAOYUatHZ
00dc075d9c refactor: clean up codes (#168) 2022-12-19 20:06:00 +08:00
ChuhanJin
af77c9fa83 build(jenkinsfile): add coverage (#139)
Co-authored-by: vincent <419436363@qq.com>
2022-12-19 17:07:10 +08:00
maskpp
1c4ed0487a fix(tests): fix docker workflow for tests (#152)
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-19 14:49:01 +08:00
HAOYUatHZ
e4761d9694 refactor(roller): reorg directory (#167) 2022-12-19 11:16:40 +08:00
maskpp
fbc7e03c67 fix(test): Fix weak test code in coordinator module (#165)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-16 15:06:46 +08:00
HAOYUatHZ
927011641d refactor(bridge): remove L2Watcher ReplayBlockResultByHash API (#144)
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-12-16 10:45:46 +08:00
Lawliet-Chan
6eb71869e8 feat(libzkp): unwind panic (#163)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-15 20:26:38 +08:00
Xi Lin
91dcd51d55 fix(database): handle empty message array (#166)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-15 17:09:37 +08:00
Nazarii Denha
479a061ee6 perf: optimize block trace db usage (#162)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-12-14 21:43:33 +08:00
ChuhanJin
f9c6fcf801 build(Jenkinsfile): move "docker cleanup" to an individual following task (#141)
Co-authored-by: vincent <419436363@qq.com>
2022-12-14 15:26:57 +08:00
HAOYUatHZ
d463c6fb72 feat(batch_proposer): add batch_proposer configuration (#129)
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-12-14 10:45:41 +08:00
maskpp
5a51d7d3f0 refactor: fix lint error (#161) 2022-12-13 21:23:24 +08:00
HAOYUatHZ
450ff4b51f build: bump version to prealpha-v7.3 (#160) 2022-12-13 13:55:51 +08:00
colinlyguo
807845a633 build(jenkins): parallel build binaries & docker images (#156)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-13 13:32:48 +08:00
maskpp
87fc9a8cc9 feat(l2geth version): bump l2geth version (#159) 2022-12-13 13:15:20 +08:00
Lawliet-Chan
a63fe6924a feat: Add version into coordinator (#157) 2022-12-13 11:58:13 +08:00
maskpp
41414e36bc feat(ws log): Add err log when client stop the ws connection. (#158) 2022-12-13 11:47:31 +08:00
HAOYUatHZ
d2a30504e9 build(github CI): replace actions/cache@v2 with `Swatinem/rust-cach… (#155) 2022-12-13 11:00:01 +08:00
Nazarii Denha
c05a31524f feat(coordinator&roller): secure handshaking (#106)
Co-authored-by: chuhanjin <419436363@qq.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: ChuhanJin <60994121+ChuhanJin@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Thegaram <th307q@gmail.com>
2022-12-12 13:24:42 +01:00
HAOYUatHZ
ac342faaf0 chore: add more logs for libzkp (#151)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2022-12-12 17:59:28 +08:00
HAOYUatHZ
f54575c410 build: rename intermediate docker images (#149) 2022-12-11 16:30:34 +08:00
HAOYUatHZ
b549816815 build(docker): add cargo-chef for coordinator docker build (#146)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
2022-12-11 15:31:06 +08:00
Lawliet-Chan
2bc951f622 feat: use common lib for verifier & roller (#74)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-12-10 18:46:18 +08:00
Xi Lin
1aea4a2584 fix(bridge): handle l2geth reorg while fetch missing block (#136) 2022-12-09 19:22:21 +08:00
ChuhanJin
11fa61c45c refact(orm): parallel insert and change gas_cost compute logic (#133)
Co-authored-by: vincent <419436363@qq.com>
2022-12-09 17:37:22 +08:00
maskpp
9cf8613bb1 fix: fix nil interface check (#143) 2022-12-09 16:02:04 +08:00
maskpp
55de584f80 build: Update make mock_abi cmd (#135) 2022-12-09 08:51:45 +08:00
HAOYUatHZ
98cd149627 refactor: merge common config (#128) 2022-12-08 20:52:25 +08:00
colinlyguo
303a7d02c8 feat: coordinator graceful restart (#110)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-12-08 19:44:16 +08:00
maskpp
e2f4477852 refactor(bridge config): delete chainID field (#134) 2022-12-06 14:21:21 +08:00
Xi Lin
cc2f5b5b0a feat(bridge): fix relay tx status based on fetched events (#56) 2022-12-06 13:32:57 +08:00
maskpp
8fedeec337 refactor(config): Delete endpoint field in coordinator config file. (#132) 2022-12-05 18:11:35 +08:00
ChuhanJin
4b32a90d52 refactor: correct coordinator log info (#130)
Co-authored-by: vincent <419436363@qq.com>
2022-12-05 15:20:52 +08:00
HAOYUatHZ
f2084fff6e chore(roller): add more logs (#127) 2022-12-02 10:47:13 +08:00
HAOYUatHZ
b71834c0e1 build: bump version number (#125) 2022-12-01 21:15:24 +08:00
ChuhanJin
b794f31d38 feat: update ws api service (#39)
Co-authored-by: chuhanjin <419436363@qq.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
2022-12-01 20:59:59 +08:00
Xi Lin
09dcaae497 fix: padding bytes in ComputeBatchID (#124) 2022-11-30 17:45:22 +08:00
HAOYUatHZ
02effbc5a1 fix(bridge): fix Layer2Relayer processSavedEvent (#123) 2022-11-30 16:19:46 +08:00
HAOYUatHZ
130f2cdf37 chore: add more logs (#121) 2022-11-30 15:42:09 +08:00
HAOYUatHZ
fed5cef7fc fix(bridge): fix watcher processedMsgHeight (#122) 2022-11-30 11:31:37 +08:00
HAOYUatHZ
c5b56f0068 fix: fix several typos (#117)
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-11-29 21:27:24 +08:00
ChuhanJin
fd92584ab9 doc: update readme in bridge/coordinator/database (#118)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-11-29 21:15:41 +08:00
Xi Lin
76131d0de7 fix(bridge): correctly initialization and reuse nonce on resubmiting (#116) 2022-11-29 21:09:33 +08:00
Lawliet-Chan
921268a2b1 feat: bump prover lib version (#119) 2022-11-29 20:41:11 +08:00
HAOYUatHZ
95fd1c57f8 feat: include at least 1 block in a batch (#114)
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-11-28 15:58:13 +08:00
Scroll Dev
93760d60b2 chore: bump version number 2022-11-25 17:09:03 +08:00
HAOYUatHZ
a701f14ad2 chore: bump l2geth version (#112) 2022-11-25 16:58:10 +08:00
ChuhanJin
f35fb5bcba test: optimize sleep and nonce in testcases (#87)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-11-24 07:55:20 +08:00
HAOYUatHZ
7258f6216b refactor: improve BlockTrace.GasUsed valuation (#108) 2022-11-22 16:56:37 +08:00
HAOYUatHZ
6bc5f754c9 chore: bump version number (#107) 2022-11-22 09:53:41 +08:00
HAOYUatHZ
6f453abbcc feat(bridge&coordinator&db&roller): commit batch of blocks (#57)
Co-authored-by: zimpha <zimpha@gmail.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-11-22 09:47:43 +08:00
Steven
971e935c48 build: update Dockerfile to use go compilation cache (#104) 2022-11-18 14:29:40 +08:00
HAOYUatHZ
399cea02e7 refactor: move common flags to utils package (#101)
Co-authored-by: chuhanjin <419436363@qq.com>
Co-authored-by: ChuhanJin <60994121+ChuhanJin@users.noreply.github.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: ChuhanJin <cj1436@nyu.edu>
2022-11-17 19:11:43 +08:00
colinlyguo
0b5f931212 fix: roller reward dispatch and proof resubmission DoS (#100)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-11-17 16:44:24 +08:00
Steven
6a2ffd8589 build: improve Dockerfile for go dependency cache (#99)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-11-17 16:11:34 +08:00
Steven
00f3a6fed6 fix: cargo-chef panic issue (#98)
Co-authored-by: Steven Gu <steven.gu@crypto.com>
2022-11-16 19:58:09 +08:00
ChuhanJin
e2b4efeaed build(database): add db_cli docker (#91)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-11-16 11:18:58 +08:00
HAOYUatHZ
c643df8036 build: remove docker push script for each commit (#85) 2022-11-14 21:52:49 +08:00
Lawliet-Chan
a87c56081f build(roller): fix Makefile (#78)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-11-10 15:46:43 +08:00
Péter Garamvölgyi
612e82ce09 fix: Fix import genesis script (#79) 2022-11-10 07:38:38 +01:00
Lawliet-Chan
80e9deb101 fix(roller): fix roller load keystore issue (#81) 2022-11-10 13:51:55 +08:00
HAOYUatHZ
2f7aa0c51e test: improve docker log (#82) 2022-11-10 13:37:19 +08:00
iczc
fcfde45d0a chore: add keydead pre-commit config (#64)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-11-10 12:18:29 +08:00
HAOYUatHZ
5272992189 build: opt jenkins logs (#72)
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-11-10 10:50:22 +08:00
HAOYUatHZ
787f410bf1 chore: print failure details for SaveLayer1Messages & `SaveLayer2Mes… (#69) 2022-11-10 09:56:13 +08:00
maskpp
d549cba678 fix(coordinator): fix coordinator/manager_test.go sock cleanning issue (#77) 2022-11-09 21:19:11 +08:00
maskpp
2288cb9f71 simplify abi (#76) 2022-11-09 21:02:15 +08:00
maskpp
054b45c8f1 refactor(bridge): simplify L2Config Unmarshalling (#75) 2022-11-09 18:12:51 +08:00
HAOYUatHZ
0e312f88bb fix(bridge): fix fetching too many blocks (#63) 2022-11-07 17:51:37 +08:00
ChuhanJin
60f30e5b33 feat: add database bin (#62)
Co-authored-by: chuhanjin <419436363@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-11-05 06:16:24 +00:00
maskpp
3e76a1d2bc Fix bug in l2 watcher test case. (#68) 2022-11-04 15:36:18 +08:00
maskpp
8b1a3aa906 refactor: use account pool for sender (#53)
Co-authored-by: Péter Garamvölgyi <th307q@gmail.com>
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-11-03 22:41:22 +08:00
Xi Lin
14e0231eb9 hotfix(contracts): fuzzing tests for testFinalizeBatchDepositERC721 (#65)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-11-03 17:20:11 +08:00
Péter Garamvölgyi
9fb40a8b14 build(contracts): use Foundry for deployment scripts (#52)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-11-03 15:52:49 +08:00
HAOYUatHZ
93efeb0b30 refactor(database): remove blockResultOrm.NumberOfBlocksInLastHour() (#49)
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-11-03 15:23:31 +08:00
maskpp
defe1f42a9 random container port (#47)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-11-03 15:11:40 +08:00
Xi Lin
a648073962 feat(contracts): commit DA for multiple blocks (#40)
Co-authored-by: Scroll Dev <dev@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-11-02 18:46:11 +08:00
Lawliet-Chan
0892813867 feat: add version info in roller handshake (#60)
Co-authored-by: Steven Gu <steven.gu@crypto.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-11-01 20:51:20 +08:00
HAOYUatHZ
138c58025e docs: bump version number (#59) 2022-11-01 13:26:57 +08:00
Xi Lin
0571028228 feat(contracts): use 0920 verifier contracts (#58) 2022-11-01 13:23:39 +08:00
Lawliet-Chan
074a7a47df fix download_params.sh (#55) 2022-10-31 14:01:06 +08:00
233 changed files with 33724 additions and 117354 deletions

7
.github/pull_request_template.md vendored Normal file
View File

@@ -0,0 +1,7 @@
1. Purpose or design rationale of this PR
2. Does this PR involve a new deployment, and involve a new git tag & docker image tag? If so, has `tag` in `common/version.go` been updated?
3. Is this PR a breaking change? If so, have it been attached a `breaking-change` label?

View File

@@ -31,7 +31,11 @@ jobs:
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: pontem-network/get-solc@master
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint

View File

@@ -24,12 +24,21 @@ jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-08-23
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint

View File

@@ -24,6 +24,11 @@ jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-08-23
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
@@ -32,7 +37,7 @@ jobs:
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
runs-on: ubuntu-latest

View File

@@ -35,21 +35,10 @@ jobs:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo registry
uses: actions/cache@v2
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-roller-cargo-registry-${{ hashFiles('roller/core/prover/rust/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v2
with:
path: ~/.cargo/git
key: ${{ runner.os }}-roller-cargo-index-${{ hashFiles('roller/core/prover/rust/Cargo.lock') }}
- name: Cache cargo target
uses: actions/cache@v2
with:
path: /home/runner/work/scroll/scroll/roller/core/prover/rust/target
key: ${{ runner.os }}-roller-cargo-build-target-${{ hashFiles('roller/core/prover/rust/Cargo.lock') }}
workspaces: "common/libzkp/impl -> target"
- name: Test
run: |
make roller
@@ -78,7 +67,7 @@ jobs:
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/go-roller/ -w .
- run: goimports -local scroll-tech/roller/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy

5
.gitignore vendored
View File

@@ -1,3 +1,8 @@
.idea
assets/params*
assets/seed
coverage.txt
build/bin
# misc
sftp-config.json

5
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,5 @@
repos:
- repo: git@github.com:scroll-tech/keydead.git
rev: main
hooks:
- id: keydead

185
Jenkinsfile vendored
View File

@@ -1,8 +1,6 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
def boolean test_result = false
pipeline {
agent any
options {
@@ -10,105 +8,112 @@ pipeline {
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
// LOG_DOCKER = 'true'
}
stages {
stage('Build') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
}
}
steps {
//start to build project
sh '''#!/bin/bash
export PATH=/home/ubuntu/go/bin:$PATH
make dev_docker
make -C bridge mock_abi
make -C bridge bridge
make -C bridge docker
make -C coordinator coordinator
make -C coordinator docker
'''
}
}
stage('Test') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
}
}
steps {
sh "docker ps -aq | xargs -r docker stop"
sh "docker container prune -f"
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
sh '''
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database/migrate
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database/docker
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/abi
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/l1
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/l2
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/sender
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/common/docker
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/coordinator
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/coordinator/verifier
cd ..
'''
script {
for (i in ['bridge', 'coordinator', 'database']) {
sh "cd $i && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|l2\\|l1\\|common\\|coordinator')"
}
}
script { test_result = true }
}
}
}
stage('Docker') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
}
}
steps {
withCredentials([usernamePassword(credentialsId: "${credentialDocker}", passwordVariable: 'dockerPassword', usernameVariable: 'dockerUser')]) {
script {
if (test_result == true) {
sh 'docker login --username=${dockerUser} --password=${dockerPassword}'
for (i in ['bridge', 'coordinator']) {
sh "docker build -t ${imagePrefix}/$i:${GIT_COMMIT} -f build/dockerfiles/${i}.Dockerfile ."
sh "docker push ${imagePrefix}/$i:${GIT_COMMIT}"
sh "docker rmi ${imagePrefix}/$i:${GIT_COMMIT}"
}
}
parallel {
stage('Build Prerequisite') {
steps {
sh 'make dev_docker'
sh 'make -C bridge mock_abi'
}
}
stage('Check Bridge Compilation') {
steps {
sh 'make -C bridge bridge'
}
}
stage('Check Coordinator Compilation') {
steps {
sh 'export PATH=/home/ubuntu/go/bin:$PATH'
sh 'make -C coordinator coordinator'
}
}
stage('Check Database Compilation') {
steps {
sh 'make -C database db_cli'
}
}
stage('Check Bridge Docker Build') {
steps {
sh 'make -C bridge docker'
}
}
stage('Check Coordinator Docker Build') {
steps {
sh 'make -C coordinator docker'
}
}
stage('Check Database Docker Build') {
steps {
sh 'make -C database docker'
}
}
}
}
stage('Parallel Test') {
parallel{
stage('Test bridge package') {
steps {
sh 'go test -v -race -coverprofile=coverage.bridge.txt -covermode=atomic -p 1 scroll-tech/bridge/...'
}
}
stage('Test common package') {
steps {
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic -p 1 scroll-tech/common/...'
}
}
stage('Test coordinator package') {
steps {
sh 'go test -v -race -coverprofile=coverage.coordinator.txt -covermode=atomic -p 1 scroll-tech/coordinator/...'
}
}
stage('Test database package') {
steps {
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic -p 1 scroll-tech/database/...'
}
}
stage('Integration test') {
steps {
sh 'go test -v -race -tags="mock_prover mock_verifier" -coverprofile=coverage.integration.txt -covermode=atomic -p 1 scroll-tech/integration-test/...'
}
}
stage('Race test bridge package') {
steps {
sh "cd bridge && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
stage('Race test coordinator package') {
steps {
sh "cd coordinator && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
stage('Race test database package') {
steps {
sh "cd database && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
}
}
stage('Compare Coverage') {
steps {
sh "./build/post-test-report-coverage.sh"
script {
currentBuild.result = 'SUCCESS'
}
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
}
}
}
post {
always {
post {
always {
publishCoverage adapters: [coberturaReportAdapter(path: 'cobertura.xml', thresholds: [[thresholdTarget: 'Aggregated Report', unhealthyThreshold: 40.0]])], checksName: '', sourceFileResolver: sourceFiles('NEVER_STORE')
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
}

View File

@@ -14,11 +14,11 @@ lint: ## The code's format and security checks.
update: ## update dependencies
go work sync
cd $(PWD)/bridge/ && go mod tidy
cd $(PWD)/common/ && go mod tidy
cd $(PWD)/coordinator/ && go mod tidy
cd $(PWD)/database/ && go mod tidy
cd $(PWD)/roller/ && go mod tidy
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .
@@ -27,7 +27,7 @@ update: ## update dependencies
dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/.
docker build -t scroll_l2geth ./common/docker/l2geth/
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -5,12 +5,8 @@ IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
mock_abi:
solc --bin mock_bridge/Mock_Bridge.sol -o mock_bridge/ --overwrite
solc --abi mock_bridge/Mock_Bridge.sol -o mock_bridge/ --overwrite
abigen --bin=mock_bridge/Mock_Bridge.bin \
--abi=mock_bridge/Mock_Bridge.abi \
--pkg=mock_bridge --out=mock_bridge/Mock_Bridge.go
awk '{sub("github.com/ethereum","github.com/scroll-tech")}1' mock_bridge/Mock_Bridge.go > temp && mv temp mock_bridge/Mock_Bridge.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
bridge: ## Builds the Bridge instance.
go build -o $(PWD)/build/bin/bridge ./cmd
@@ -25,7 +21,7 @@ clean: ## Empty out the bin folder
@rm -rf build/bin
docker:
docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridge.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridge.Dockerfile
docker_push:
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}

View File

@@ -1,13 +1,12 @@
# Bridge
[![Actions Status](https://scroll-tech/bridge/workflows/Continuous%20Integration/badge.svg)](https://scroll-tech/bridge/actions)
[![codecov](https://codecov.io/gh/scroll-tech/bridge/branch/master/graph/badge.svg)](https://codecov.io/gh/scroll-tech/bridge)
This repo contains the Scroll bridge.
In addition, launching the bridge will launch a separate instance of l2geth, and sets up a communication channel
between the two, over JSON-RPC sockets.
Something we should pay attention is that all private keys inside sender instance cannot be duplicated.
## Dependency
+ install `abigen`
@@ -23,38 +22,7 @@ make clean
make bridge
```
## db operation
* init, show version, rollback, check status db
```bash
# DB_DSN: db data source name
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# DB_DRIVER: db driver name
export DB_DRIVER="postgres"
# TEST_DB_DRIVER, TEST_DB_DSN: It is required when executing db test cases
export TEST_DB_DRIVER="postgres"
export TEST_DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# init db
./build/bin/bridge reset [--config ./config.json]
# show db version
./build/bin/bridge version [--config ./config.json]
# rollback db
/build/bin/bridge rollback [--version version] [--config ./config.json]
# show db status
./build/bin/bridge status [--config ./config.json]
# migrate db
./build/bin/bridge migrate [--config ./config.json]
```
## Start
* use default ports and config.json
```bash

File diff suppressed because one or more lines are too long

View File

@@ -17,29 +17,20 @@ func TestPackRelayMessageWithProof(t *testing.T) {
assert.NoError(err)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockNumber: big.NewInt(0),
BlockHeight: big.NewInt(0),
BatchIndex: big.NewInt(0),
MerkleProof: make([]byte, 0),
}
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
assert.NoError(err)
}
func TestPackCommitBlock(t *testing.T) {
func TestPackCommitBatch(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
assert.NoError(err)
header := bridge_abi.IZKRollupBlockHeader{
BlockHash: common.Hash{},
ParentHash: common.Hash{},
BaseFee: big.NewInt(0),
StateRoot: common.Hash{},
BlockHeight: 0,
GasUsed: 0,
Timestamp: 0,
ExtraData: make([]byte, 0),
}
txns := make([]bridge_abi.IZKRollupLayer2Transaction, 5)
for i := 0; i < 5; i++ {
txns[i] = bridge_abi.IZKRollupLayer2Transaction{
@@ -50,13 +41,35 @@ func TestPackCommitBlock(t *testing.T) {
GasPrice: big.NewInt(0),
Value: big.NewInt(0),
Data: make([]byte, 0),
R: big.NewInt(0),
S: big.NewInt(0),
V: 0,
}
}
_, err = l1RollupABI.Pack("commitBlock", header, txns)
header := bridge_abi.IZKRollupLayer2BlockHeader{
BlockHash: common.Hash{},
ParentHash: common.Hash{},
BaseFee: big.NewInt(0),
StateRoot: common.Hash{},
BlockHeight: 0,
GasUsed: 0,
Timestamp: 0,
ExtraData: make([]byte, 0),
Txs: txns,
}
batch := bridge_abi.IZKRollupLayer2Batch{
BatchIndex: 0,
ParentHash: common.Hash{},
Blocks: []bridge_abi.IZKRollupLayer2BlockHeader{header},
}
_, err = l1RollupABI.Pack("commitBatch", batch)
assert.NoError(err)
}
func TestPackFinalizeBlockWithProof(t *testing.T) {
func TestPackFinalizeBatchWithProof(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
@@ -69,7 +82,7 @@ func TestPackFinalizeBlockWithProof(t *testing.T) {
instance[i] = big.NewInt(0)
}
_, err = l1RollupABI.Pack("finalizeBlockWithProof", common.Hash{}, proof, instance)
_, err = l1RollupABI.Pack("finalizeBatchWithProof", common.Hash{}, proof, instance)
assert.NoError(err)
}

125
bridge/cmd/app/app.go Normal file
View File

@@ -0,0 +1,125 @@
package app
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
var (
app *cli.App
)
func init() {
// Set up Bridge app info.
app = cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `bridge-test` app for integration-test.
utils.RegisterSimulation(app, "bridge-test")
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
l2Backend.APIs())
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
log.Info("Start bridge successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run run bridge cmd instance.
func Run() {
// Run the bridge.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,19 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunBridge(t *testing.T) {
bridge := cmd.NewCmd(t, "bridge-test", "--version")
defer bridge.WaitExit()
// wait result
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
bridge.RunApp(nil)
}

31
bridge/cmd/app/flags.go Normal file
View File

@@ -0,0 +1,31 @@
package app
import (
"github.com/urfave/cli/v2"
)
var (
apiFlags = []cli.Flag{
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8290,
}
)

View File

@@ -1,88 +0,0 @@
package main
import (
"scroll-tech/bridge/config"
"github.com/jmoiron/sqlx"
"github.com/urfave/cli/v2"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func initDB(file string) (*sqlx.DB, error) {
cfg, err := config.NewConfig(file)
if err != nil {
return nil, err
}
dbCfg := cfg.DBConfig
factory, err := database.NewOrmFactory(dbCfg)
if err != nil {
return nil, err
}
log.Debug("Got db config from env", "driver name", dbCfg.DriverName, "dsn", dbCfg.DSN)
return factory.GetDB(), nil
}
// ResetDB clean or reset database.
func ResetDB(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
var version int64
err = migrate.Rollback(db.DB, &version)
if err != nil {
return err
}
log.Info("successful to reset", "init version", version)
return nil
}
// CheckDBStatus check db status
func CheckDBStatus(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
return migrate.Status(db.DB)
}
// DBVersion return the latest version
func DBVersion(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
version, err := migrate.Current(db.DB)
log.Info("show database version", "db version", version)
return err
}
// MigrateDB migrate db
func MigrateDB(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
return migrate.Migrate(db.DB)
}
// RollbackDB rollback db by version
func RollbackDB(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
version := ctx.Int64("version")
return migrate.Rollback(db.DB, &version)
}

View File

@@ -1,93 +0,0 @@
package main
import "github.com/urfave/cli/v2"
var (
commonFlags = []cli.Flag{
&configFileFlag,
&verbosityFlag,
&logFileFlag,
&logJSONFormat,
&logDebugFlag,
}
// configFileFlag load json type config file.
configFileFlag = cli.StringFlag{
Name: "config",
Usage: "JSON configuration file",
Value: "./config.json",
}
// verbosityFlag log level.
verbosityFlag = cli.IntFlag{
Name: "verbosity",
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail",
Value: 3,
}
// logFileFlag decides where the logger output is sent. If this flag is left
// empty, it will log to stdout.
logFileFlag = cli.StringFlag{
Name: "log.file",
Usage: "Tells the sequencer where to write log entries",
}
logJSONFormat = cli.BoolFlag{
Name: "log.json",
Usage: "Tells the sequencer whether log format is json or not",
Value: true,
}
logDebugFlag = cli.BoolFlag{
Name: "log.debug",
Usage: "Prepends log messages with call-site location (file and line number)",
}
apiFlags = []cli.Flag{
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8290,
}
l1Flags = []cli.Flag{
&l1ChainIDFlag,
&l1UrlFlag,
}
l1ChainIDFlag = cli.IntFlag{
Name: "l1.chainID",
Usage: "l1 chain id",
Value: 4,
}
l1UrlFlag = cli.StringFlag{
Name: "l1.endpoint",
Usage: "The endpoint connect to l1chain node",
Value: "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
}
l2Flags = []cli.Flag{
&l2ChainIDFlag,
&l2UrlFlag,
}
l2ChainIDFlag = cli.IntFlag{
Name: "l2.chainID",
Usage: "l2 chain id",
Value: 53077,
}
l2UrlFlag = cli.StringFlag{
Name: "l2.endpoint",
Usage: "The endpoint connect to l2chain node",
Value: "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
}
)

View File

@@ -1,191 +1,7 @@
package main
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/database"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
import "scroll-tech/bridge/cmd/app"
func main() {
// Set up Bridge app info.
app := cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, commonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Flags = append(app.Flags, l1Flags...)
app.Flags = append(app.Flags, l2Flags...)
app.Before = func(ctx *cli.Context) error {
return utils.Setup(&utils.LogConfig{
LogFile: ctx.String(logFileFlag.Name),
LogJSONFormat: ctx.Bool(logJSONFormat.Name),
LogDebug: ctx.Bool(logDebugFlag.Name),
Verbosity: ctx.Int(verbosityFlag.Name),
})
}
app.Commands = []*cli.Command{
{
Name: "reset",
Usage: "Clean and reset database.",
Action: ResetDB,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "status",
Usage: "Check migration status.",
Action: CheckDBStatus,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "version",
Usage: "Display the current database version.",
Action: DBVersion,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "migrate",
Usage: "Migrate the database to the latest version.",
Action: MigrateDB,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "rollback",
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
Action: RollbackDB,
Flags: []cli.Flag{
&configFileFlag,
&cli.IntFlag{
Name: "version",
Usage: "Rollback to the specified version.",
Value: 0,
},
},
},
}
// Run the sequencer.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func applyConfig(ctx *cli.Context, cfg *config.Config) {
if ctx.IsSet(l1ChainIDFlag.Name) {
cfg.L1Config.ChainID = ctx.Int64(l1ChainIDFlag.Name)
}
if ctx.IsSet(l1UrlFlag.Name) {
cfg.L1Config.Endpoint = ctx.String(l1UrlFlag.Name)
}
if ctx.IsSet(l2ChainIDFlag.Name) {
cfg.L2Config.ChainID = ctx.Int64(l2ChainIDFlag.Name)
}
if ctx.IsSet(l2UrlFlag.Name) {
cfg.L2Config.Endpoint = ctx.String(l2UrlFlag.Name)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(configFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
applyConfig(ctx, cfg)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
srv := rpc.NewServer()
apis := l2Backend.APIs()
for _, api := range apis {
if err = srv.RegisterName(api.Namespace, api.Service); err != nil {
log.Crit("register namespace failed", "namespace", api.Namespace, "error", err)
}
}
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
rpc.DefaultHTTPTimeouts,
srv)
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
app.Run()
}

View File

@@ -1,9 +1,9 @@
{
"l1_config": {
"confirmations": 6,
"chain_id": 4,
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"start_height": 0,
"relayer_config": {
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
@@ -15,24 +15,21 @@
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "AccessListTx"
"tx_type": "AccessListTx",
"min_balance": 100000000000000000000
},
"private_key": "abc123abc123abc123abc123abc123abc123abc123abc123abc123abc123abc1"
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
]
}
},
"l2_config": {
"confirmations": 1,
"chain_id": 53077,
"proof_generation_freq": 1,
"skipped_opcodes": [
"CREATE2",
"DELEGATECALL"
],
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"check_pending_time": 10,
@@ -41,13 +38,38 @@
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "DynamicFeeTx"
"tx_type": "DynamicFeeTx",
"min_balance": 100000000000000000000
},
"private_key": "1212121212121212121212121212121212121212121212121212121212121212"
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
"rollup_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
]
},
"batch_proposer_config": {
"proof_generation_freq": 1,
"batch_gas_threshold": 3000000,
"batch_tx_num_threshold": 135,
"batch_time_sec": 300,
"batch_blocks_limit": 100,
"skipped_opcodes": [
"CREATE2",
"DELEGATECALL"
]
}
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
"persistence": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
},
"redis": {
"url": "redis://default:@localhost:6379/0",
"expirations": {
"trace": 3600
}
}
}
}

View File

@@ -5,84 +5,14 @@ import (
"os"
"path/filepath"
"github.com/scroll-tech/go-ethereum/common"
"scroll-tech/common/utils"
db_config "scroll-tech/database"
"scroll-tech/database"
)
// SenderConfig The config for transaction sender
type SenderConfig struct {
// The RPC endpoint of the ethereum or scroll public node.
Endpoint string `json:"endpoint"`
// The time to trigger check pending txs in sender.
CheckPendingTime uint64 `json:"check_pending_time"`
// The number of blocks to wait to escalate increase gas price of the transaction.
EscalateBlocks uint64 `json:"escalate_blocks"`
// The gap number between a block be confirmed and the latest block.
Confirmations uint64 `json:"confirmations"`
// The numerator of gas price escalate multiple.
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
// The denominator of gas price escalate multiple.
EscalateMultipleDen uint64 `json:"escalate_multiple_den"`
// The maximum gas price can be used to send transaction.
MaxGasPrice uint64 `json:"max_gas_price"`
// the transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
TxType string `json:"tx_type"`
}
// L1Config loads l1eth configuration items.
type L1Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l1 chainID.
ChainID int64 `json:"chain_id"`
// l1 eth node url.
Endpoint string `json:"endpoint"`
// The start height to sync event from layer 1
StartHeight uint64 `json:"start_height"`
// The messenger contract address deployed on layer 1 chain.
L1MessengerAddress common.Address `json:"l1_messenger_address,omitempty"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}
// L2Config loads l2geth configuration items.
type L2Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l2geth chainId.
ChainID int64 `json:"chain_id"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address,omitempty"`
// Proof generation frequency, generating proof every k blocks
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
// Skip generating proof when that opcodes appeared
SkippedOpcodes []string `json:"skipped_opcodes"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}
// RelayerConfig loads relayer configuration items.
type RelayerConfig struct {
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// MessengerContractAddress store the scroll messenger contract address.
MessengerContractAddress common.Address `json:"messenger_contract_address"`
// sender config
SenderConfig *SenderConfig `json:"sender_config"`
// The private key of the relayer
PrivateKey string `json:"private_key"`
}
// Config load configuration items.
type Config struct {
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *db_config.DBConfig `json:"db_config"`
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *database.DBConfig `json:"db_config"`
}
// NewConfig returns a new instance of Config.
@@ -98,9 +28,5 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// cover value by env fields
cfg.DBConfig.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DBConfig.DSN)
cfg.DBConfig.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DBConfig.DriverName)
return cfg, nil
}

View File

@@ -0,0 +1,39 @@
package config_test
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
)
func TestConfig(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.True(t, assert.NoError(t, err), "failed to load config")
assert.True(t, len(cfg.L2Config.BatchProposerConfig.SkippedOpcodes) > 0)
assert.True(t, len(cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys) > 0)
assert.True(t, len(cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys) > 0)
assert.True(t, len(cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys) > 0)
data, err := json.Marshal(cfg)
assert.NoError(t, err)
tmpJosn := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
defer func() { _ = os.Remove(tmpJosn) }()
assert.NoError(t, os.WriteFile(tmpJosn, data, 0644))
cfg2, err := config.NewConfig(tmpJosn)
assert.NoError(t, err)
assert.Equal(t, cfg.L1Config, cfg2.L1Config)
assert.Equal(t, cfg.L2Config, cfg2.L2Config)
assert.Equal(t, cfg.DBConfig, cfg2.DBConfig)
}

View File

@@ -0,0 +1,19 @@
package config
import "github.com/scroll-tech/go-ethereum/common"
// L1Config loads l1eth configuration items.
type L1Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l1 eth node url.
Endpoint string `json:"endpoint"`
// The start height to sync event from layer 1
StartHeight uint64 `json:"start_height"`
// The messenger contract address deployed on layer 1 chain.
L1MessengerAddress common.Address `json:"l1_messenger_address"`
// The rollup contract address deployed on layer 1 chain.
RollupContractAddress common.Address `json:"rollup_contract_address"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}

View File

@@ -0,0 +1,73 @@
package config
import (
"encoding/json"
"github.com/scroll-tech/go-ethereum/common"
)
// L2Config loads l2geth configuration items.
type L2Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address,omitempty"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
// The batch_proposer config
BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"`
}
// BatchProposerConfig loads l2watcher batch_proposer configuration items.
type BatchProposerConfig struct {
// Proof generation frequency, generating proof every k blocks
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
// Txnum threshold in a batch
BatchTxNumThreshold uint64 `json:"batch_tx_num_threshold"`
// Gas threshold in a batch
BatchGasThreshold uint64 `json:"batch_gas_threshold"`
// Time waited to generate a batch even if gas_threshold not met
BatchTimeSec uint64 `json:"batch_time_sec"`
// Max number of blocks in a batch
BatchBlocksLimit uint64 `json:"batch_blocks_limit"`
// Skip generating proof when that opcodes appeared
SkippedOpcodes map[string]struct{} `json:"-"`
}
// batchProposerConfigAlias RelayerConfig alias name
type batchProposerConfigAlias BatchProposerConfig
// UnmarshalJSON unmarshal BatchProposerConfig config struct.
func (b *BatchProposerConfig) UnmarshalJSON(input []byte) error {
var jsonConfig struct {
batchProposerConfigAlias
SkippedOpcodes []string `json:"skipped_opcodes,omitempty"`
}
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
*b = BatchProposerConfig(jsonConfig.batchProposerConfigAlias)
b.SkippedOpcodes = make(map[string]struct{}, len(jsonConfig.SkippedOpcodes))
for _, opcode := range jsonConfig.SkippedOpcodes {
b.SkippedOpcodes[opcode] = struct{}{}
}
return nil
}
// MarshalJSON marshal BatchProposerConfig in order to transfer skipOpcodes.
func (b *BatchProposerConfig) MarshalJSON() ([]byte, error) {
jsonConfig := struct {
batchProposerConfigAlias
SkippedOpcodes []string `json:"skipped_opcodes,omitempty"`
}{batchProposerConfigAlias(*b), nil}
// Load skipOpcodes.
for op := range b.SkippedOpcodes {
jsonConfig.SkippedOpcodes = append(jsonConfig.SkippedOpcodes, op)
}
return json.Marshal(&jsonConfig)
}

View File

@@ -0,0 +1,107 @@
package config
import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
)
// SenderConfig The config for transaction sender
type SenderConfig struct {
// The RPC endpoint of the ethereum or scroll public node.
Endpoint string `json:"endpoint"`
// The time to trigger check pending txs in sender.
CheckPendingTime uint64 `json:"check_pending_time"`
// The number of blocks to wait to escalate increase gas price of the transaction.
EscalateBlocks uint64 `json:"escalate_blocks"`
// The gap number between a block be confirmed and the latest block.
Confirmations uint64 `json:"confirmations"`
// The numerator of gas price escalate multiple.
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
// The denominator of gas price escalate multiple.
EscalateMultipleDen uint64 `json:"escalate_multiple_den"`
// The maximum gas price can be used to send transaction.
MaxGasPrice uint64 `json:"max_gas_price"`
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
TxType string `json:"tx_type"`
// The min balance set for check and set balance for sender's accounts.
MinBalance *big.Int `json:"min_balance,omitempty"`
}
// RelayerConfig loads relayer configuration items.
// What we need to pay attention to is that
// `MessageSenderPrivateKeys` and `RollupSenderPrivateKeys` cannot have common private keys.
type RelayerConfig struct {
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// MessengerContractAddress store the scroll messenger contract address.
MessengerContractAddress common.Address `json:"messenger_contract_address"`
// sender config
SenderConfig *SenderConfig `json:"sender_config"`
// The private key of the relayer
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
}
// relayerConfigAlias RelayerConfig alias name
type relayerConfigAlias RelayerConfig
// UnmarshalJSON unmarshal relayer_config struct.
func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
var jsonConfig struct {
relayerConfigAlias
// The private key of the relayer
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
// Get messenger private key list.
*r = RelayerConfig(jsonConfig.relayerConfigAlias)
for _, privStr := range jsonConfig.MessageSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
}
r.MessageSenderPrivateKeys = append(r.MessageSenderPrivateKeys, priv)
}
// Get rollup private key
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect roller_private_key format, err: %v", err)
}
r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv)
}
return nil
}
// MarshalJSON marshal RelayerConfig config, transfer private keys.
func (r *RelayerConfig) MarshalJSON() ([]byte, error) {
jsonConfig := struct {
relayerConfigAlias
// The private key of the relayer
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}{relayerConfigAlias(*r), nil, nil}
// Transfer message sender private keys to hex type.
for _, priv := range r.MessageSenderPrivateKeys {
jsonConfig.MessageSenderPrivateKeys = append(jsonConfig.MessageSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
// Transfer rollup sender private keys to hex type.
for _, priv := range r.RollupSenderPrivateKeys {
jsonConfig.RollupSenderPrivateKeys = append(jsonConfig.RollupSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
return json.Marshal(&jsonConfig)
}

View File

@@ -3,40 +3,43 @@ module scroll-tech/bridge
go 1.18
require (
github.com/ethereum/go-ethereum v1.10.13
github.com/gorilla/websocket v1.4.2
github.com/jmoiron/sqlx v1.3.5
github.com/iden3/go-iden3-crypto v0.0.13
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.3.0
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e
github.com/stretchr/testify v1.8.1
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
modernc.org/mathutil v1.4.1
)
require (
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.12 // indirect
github.com/lib/pq v1.10.6 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/russross/blackfriday/v2 v2.0.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.3.1 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -71,10 +71,10 @@ github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOC
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -82,8 +82,9 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
@@ -92,8 +93,9 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
@@ -108,8 +110,9 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.10.13 h1:DEYFP9zk+Gruf3ae1JOJVhNmxK28ee+sMELPLgYTXpA=
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
@@ -136,8 +139,6 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
@@ -188,8 +189,9 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
@@ -202,12 +204,13 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/iden3/go-iden3-crypto v0.0.12 h1:dXZF+R9iI07DK49LHX/EKC3jTa0O2z+TUyvxjGK7V38=
github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
github.com/iden3/go-iden3-crypto v0.0.13 h1:ixWRiaqDULNyIDdOWz2QQJG5t4PpNHkQk2P6GV94cok=
github.com/iden3/go-iden3-crypto v0.0.13/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
@@ -220,14 +223,12 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@@ -248,8 +249,9 @@ github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH6
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -260,9 +262,6 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
@@ -283,9 +282,6 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -320,6 +316,7 @@ github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHu
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -339,23 +336,31 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d h1:eh1i1M9BKPCQckNFQsV/yfazQ895hevkWr8GuqhKNrk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d/go.mod h1:SkQ1431r0LkrExTELsapw6JQHYpki8O1mSsObTSmBWU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e h1:TAqAeQiQI6b+TRyqyQ6qhizqY35LhqYe8lWhG0nNRGw=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -369,6 +374,7 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZL
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -376,8 +382,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@@ -389,13 +396,16 @@ github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.10.2 h1:x3p8awjp/2arX+Nl/G2040AZpOCHS/eMJJ1/a+mye4Y=
github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
@@ -417,8 +427,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -486,8 +496,9 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -532,8 +543,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -544,8 +555,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -655,5 +666,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -4,11 +4,9 @@ import (
"context"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database/orm"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
)
@@ -18,28 +16,22 @@ type Backend struct {
cfg *config.L1Config
watcher *Watcher
relayer *Layer1Relayer
orm orm.Layer1MessageOrm
orm database.OrmFactory
}
// New returns a new instance of Backend.
func New(ctx context.Context, cfg *config.L1Config, orm orm.Layer1MessageOrm) (*Backend, error) {
func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*Backend, error) {
client, err := ethclient.Dial(cfg.Endpoint)
if err != nil {
return nil, err
}
l1MessengerABI, err := bridge_abi.L1MessengerMetaData.GetAbi()
if err != nil {
log.Warn("new L1MessengerABI failed", "err", err)
return nil, err
}
relayer, err := NewLayer1Relayer(ctx, client, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, l1MessengerABI, orm)
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RollupContractAddress, orm)
return &Backend{
cfg: cfg,

65
bridge/l1/l1_test.go Normal file
View File

@@ -0,0 +1,65 @@
package l1
import (
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
// docker consider handler.
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
)
func setupEnv(t *testing.T) {
// Load config.
var err error
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
// Create l2geth container.
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.Persistence.DriverName)
cfg.DBConfig.Persistence.DSN = dbImg.Endpoint()
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if l1gethImg != nil {
assert.NoError(t, l1gethImg.Stop())
}
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
}
func TestL1(t *testing.T) {
setupEnv(t)
t.Run("testCreateNewL1Relayer", testCreateNewL1Relayer)
t.Run("testStartWatcher", testStartWatcher)
t.Cleanup(func() {
free(t)
})
}

View File

@@ -2,6 +2,7 @@ package l1
import (
"context"
"errors"
"math/big"
"time"
@@ -21,7 +22,7 @@ import (
)
// Layer1Relayer is responsible for
// 1. fetch pending Layer1Message from db
// 1. fetch pending L1Message from db
// 2. relay pending message to layer 2 node
//
// Actions are triggered by new head from layer 1 geth node.
@@ -31,7 +32,7 @@ type Layer1Relayer struct {
client *ethclient.Client
sender *sender.Sender
db orm.Layer1MessageOrm
db orm.L1MessageOrm
cfg *config.RelayerConfig
// channel used to communicate with transaction sender
@@ -42,22 +43,17 @@ type Layer1Relayer struct {
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1ConfirmNum int64, db orm.Layer1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1ConfirmNum int64, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
if err != nil {
log.Warn("new L2MessengerABI failed", "err", err)
return nil, err
}
prv, err := crypto.HexToECDSA(cfg.PrivateKey)
sender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
log.Error("Failed to import private key from config file")
return nil, err
}
sender, err := sender.NewSender(ctx, cfg.SenderConfig, prv)
if err != nil {
log.Error("new sender failed", "err", err)
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
log.Error("new sender failed", "main address", addr.String(), "err", err)
return nil, err
}
@@ -76,17 +72,29 @@ func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1Confir
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer1Relayer) ProcessSavedEvents() {
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL1UnprocessedMessages()
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending, 100)
if err != nil {
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
return
}
if len(msgs) == 0 {
return
if len(msgs) > 0 {
log.Info("Processing L1 messages", "count", len(msgs))
}
msg := msgs[0]
for _, msg := range msgs {
if err = r.processSavedEvent(msg); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
return
}
}
}
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// @todo add support to relay multiple messages
sender := common.HexToAddress(msg.Sender)
from := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
value, ok := big.NewInt(0).SetString(msg.Value, 10)
if !ok {
@@ -98,24 +106,30 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
deadline := big.NewInt(int64(msg.Deadline))
msgNonce := big.NewInt(int64(msg.Nonce))
calldata := common.Hex2Bytes(msg.Calldata)
data, err := r.l2MessengerABI.Pack("relayMessage", sender, target, value, fee, deadline, msgNonce, calldata)
data, err := r.l2MessengerABI.Pack("relayMessage", from, target, value, fee, deadline, msgNonce, calldata)
if err != nil {
log.Error("Failed to pack relayMessage", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return
return err
}
hash, err := r.sender.SendTransaction(msg.Layer1Hash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil {
log.Error("Failed to send relayMessage tx to L2", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
return
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)
}
log.Info("relayMessage to layer2", "layer1 hash", msg.Layer1Hash, "tx hash", hash)
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
}
if err != nil {
return err
}
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.Layer1Hash, hash.String(), orm.MsgSubmitted)
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
if err != nil {
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.layer1hash", msg.Layer1Hash, "msg.height", msg.Height, "err", err)
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err)
}
return err
}
// Start the relayer process
@@ -131,12 +145,17 @@ func (r *Layer1Relayer) Start() {
// number, err := r.client.BlockNumber(r.ctx)
// log.Info("receive header", "height", number)
r.ProcessSavedEvents()
case cfm := <-r.confirmationCh: // @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, cfm.TxHash.String(), orm.MsgConfirmed)
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
case cfm := <-r.confirmationCh:
if !cfm.IsSuccessful {
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, orm.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
case <-r.stopCh:
return
}

View File

@@ -1,4 +1,4 @@
package l1_test
package l1
import (
"context"
@@ -7,52 +7,25 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database/migrate"
"scroll-tech/database"
"scroll-tech/bridge/mock"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/common/utils"
)
var TEST_CONFIG = &mock.TestConfig{
L1GethTestConfig: mock.L1GethTestConfig{
HPort: 0,
WPort: 8570,
},
DbTestconfig: mock.DbTestconfig{
DbName: "testl1relayer_db",
DbPort: 5440,
DB_CONFIG: &database.DBConfig{
DriverName: utils.GetEnvWithDefault("TEST_DB_DRIVER", "postgres"),
DSN: utils.GetEnvWithDefault("TEST_DB_DSN", "postgres://postgres:123456@localhost:5440/testl1relayer_db?sslmode=disable"),
},
},
}
// TestCreateNewRelayer test create new relayer instance and stop
func TestCreateNewL1Relayer(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
// testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
l1docker := mock.NewTestL1Docker(t, TEST_CONFIG)
defer l1docker.Stop()
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
client, err := ethclient.Dial(l1docker.Endpoint())
client, err := ethclient.Dial(l1gethImg.Endpoint())
assert.NoError(t, err)
dbImg := mock.GetDbDocker(t, TEST_CONFIG)
dbImg.Start()
defer dbImg.Stop()
db, err := database.NewOrmFactory(TEST_CONFIG.DB_CONFIG)
relayer, err := NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
relayer, err := l1.NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
relayer.Start()
defer relayer.Stop()
relayer.Start()
}

View File

@@ -5,33 +5,46 @@ import (
"math/big"
"time"
"github.com/scroll-tech/go-ethereum"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/orm"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
)
const (
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
// keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
sentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
)
type rollupEvent struct {
batchID common.Hash
txHash common.Hash
status orm.RollupStatus
}
// Watcher will listen for smart contract events from Eth L1.
type Watcher struct {
ctx context.Context
client *ethclient.Client
db orm.Layer1MessageOrm
db database.OrmFactory
// The number of new blocks to wait for a block to be confirmed
confirmations uint64
messengerAddress common.Address
messengerABI *abi.ABI
rollupAddress common.Address
rollupABI *abi.ABI
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
@@ -40,7 +53,7 @@ type Watcher struct {
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `watcher.Start`.
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations uint64, messengerAddress common.Address, messengerABI *abi.ABI, db orm.Layer1MessageOrm) *Watcher {
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations uint64, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
savedHeight, err := db.GetLayer1LatestWatchedHeight()
if err != nil {
log.Warn("Failed to fetch height from db", "err", err)
@@ -58,123 +71,262 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
db: db,
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: messengerABI,
messengerABI: bridge_abi.L1MessengerMetaABI,
rollupAddress: rollupAddress,
rollupABI: bridge_abi.RollupMetaABI,
processedMsgHeight: uint64(savedHeight),
stop: stop,
}
}
// Start the Watcher module.
func (r *Watcher) Start() {
func (w *Watcher) Start() {
go func() {
// trigger by timer
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
for ; true; <-ticker.C {
select {
case <-ticker.C:
blockNumber, err := r.client.BlockNumber(r.ctx)
case <-w.stop:
return
default:
blockNumber, err := w.client.BlockNumber(w.ctx)
if err != nil {
log.Error("Failed to get block number", "err", err)
continue
}
if err := r.fetchContractEvent(blockNumber); err != nil {
if err := w.FetchContractEvent(blockNumber); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
}
case <-r.stop:
return
}
}
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (r *Watcher) Stop() {
r.stop <- true
func (w *Watcher) Stop() {
w.stop <- true
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (r *Watcher) fetchContractEvent(blockHeight uint64) error {
fromBlock := int64(r.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(r.confirmations)
const contractEventsBlocksFetchLimit = int64(10)
if toBlock < fromBlock {
return nil
// FetchContractEvent pull latest event logs from given contract address and save in Persistence
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
defer func() {
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(w.confirmations)
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
if to > toBlock {
to = toBlock
}
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.rollupAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil {
log.Warn("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
continue
}
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
log.Info("L1 events types", "SentMessageCount", len(sentMessageEvents), "RelayedMessageCount", len(relayedMessageEvents), "RollupEventCount", len(rollupEvents))
// use rollup event to update rollup results db status
var batchIDs []string
for _, event := range rollupEvents {
batchIDs = append(batchIDs, event.batchID.String())
}
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
if err != nil {
log.Error("Failed to GetRollupStatusByIDList", "err", err)
return err
}
if len(statuses) != len(batchIDs) {
log.Error("RollupStatus.Length mismatch with BatchIDs.Length", "RollupStatus.Length", len(statuses), "BatchIDs.Length", len(batchIDs))
return nil
}
for index, event := range rollupEvents {
batchID := event.batchID.String()
status := statuses[index]
// only update when db status is before event status
if event.status > status {
if event.status == orm.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
} else if event.status == orm.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
}
if err != nil {
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
return err
}
}
}
// Update relayed message first to make sure we don't forget to update submitted message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err
}
}
if err = w.db.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
return err
}
w.processedMsgHeight = uint64(to)
}
// warning: uint int conversion...
query := ethereum.FilterQuery{
FromBlock: big.NewInt(fromBlock), // inclusive
ToBlock: big.NewInt(toBlock), // inclusive
Addresses: []common.Address{
r.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 1)
query.Topics[0][0] = common.HexToHash(sentMessageEventSignature)
logs, err := r.client.FilterLogs(r.ctx, query)
if err != nil {
log.Warn("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
return nil
}
log.Info("Received new L1 messages", "fromBlock", fromBlock, "toBlock", toBlock,
"cnt", len(logs))
eventLogs, err := parseBridgeEventLogs(logs, r.messengerABI)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
err = r.db.SaveLayer1Messages(r.ctx, eventLogs)
if err == nil {
r.processedMsgHeight = uint64(toBlock)
}
return err
return nil
}
func parseBridgeEventLogs(logs []types.Log, messengerABI *abi.ABI) ([]*orm.Layer1Message, error) {
func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var parsedlogs []*orm.Layer1Message
var l1Messages []*orm.L1Message
var relayedMessages []relayedMessage
var rollupEvents []rollupEvent
for _, vLog := range logs {
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
err := messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
if err != nil {
log.Warn("Failed to unpack layer1 SentMessage event", "err", err)
return parsedlogs, err
err := w.messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
if err != nil {
log.Warn("Failed to unpack layer1 SentMessage event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
l1Messages = append(l1Messages, &orm.L1Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer1Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
txHash: vLog.TxHash,
isSuccessful: false,
})
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash
BatchIndex *big.Int
ParentHash common.Hash
}{}
// BatchID is in topics[1]
event.BatchID = common.HexToHash(vLog.Topics[1].String())
err := w.rollupABI.UnpackIntoInterface(&event, "CommitBatch", vLog.Data)
if err != nil {
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
rollupEvents = append(rollupEvents, rollupEvent{
batchID: event.BatchID,
txHash: vLog.TxHash,
status: orm.RollupCommitted,
})
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash
BatchIndex *big.Int
ParentHash common.Hash
}{}
// BatchID is in topics[1]
event.BatchID = common.HexToHash(vLog.Topics[1].String())
err := w.rollupABI.UnpackIntoInterface(&event, "FinalizeBatch", vLog.Data)
if err != nil {
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)
return l1Messages, relayedMessages, rollupEvents, err
}
rollupEvents = append(rollupEvents, rollupEvent{
batchID: event.BatchID,
txHash: vLog.TxHash,
status: orm.RollupFinalized,
})
default:
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
parsedlogs = append(parsedlogs, &orm.Layer1Message{
Nonce: event.MessageNonce.Uint64(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer1Hash: vLog.TxHash.Hex(),
})
}
return parsedlogs, nil
return l1Messages, relayedMessages, rollupEvents, nil
}

29
bridge/l1/watcher_test.go Normal file
View File

@@ -0,0 +1,29 @@
package l1
import (
"context"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func testStartWatcher(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
client, err := ethclient.Dial(l1gethImg.Endpoint())
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
watcher.Start()
defer watcher.Stop()
}

View File

@@ -3,14 +3,11 @@ package l2
import (
"context"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
)
@@ -30,29 +27,15 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
return nil, err
}
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
if err != nil {
log.Warn("new L2MessengerABI failed", "err", err)
return nil, err
}
skippedOpcodes := make(map[string]struct{}, len(cfg.SkippedOpcodes))
for _, op := range cfg.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
proofGenerationFreq := cfg.ProofGenerationFreq
if proofGenerationFreq == 0 {
log.Warn("receive 0 proof_generation_freq, change to 1")
proofGenerationFreq = 1
}
relayer, err := NewLayer2Relayer(ctx, client, proofGenerationFreq, skippedOpcodes, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(ctx, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, proofGenerationFreq, skippedOpcodes, cfg.L2MessengerAddress, l2MessengerABI, orm)
l2Watcher, err := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, orm)
if err != nil {
return nil, err
}
return &Backend{
cfg: cfg,
@@ -86,8 +69,3 @@ func (l2 *Backend) APIs() []rpc.API {
},
}
}
// MockBlockResult for test case
func (l2 *Backend) MockBlockResult(blockResult *types.BlockResult) {
l2.l2Watcher.Send(blockResult)
}

140
bridge/l2/batch_proposer.go Normal file
View File

@@ -0,0 +1,140 @@
package l2
import (
"fmt"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/orm"
"scroll-tech/bridge/config"
)
type batchProposer struct {
mutex sync.Mutex
orm database.OrmFactory
batchTimeSec uint64
batchGasThreshold uint64
batchTxNumThreshold uint64
batchBlocksLimit uint64
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
}
func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory) *batchProposer {
return &batchProposer{
mutex: sync.Mutex{},
orm: orm,
batchTimeSec: cfg.BatchTimeSec,
batchGasThreshold: cfg.BatchGasThreshold,
batchTxNumThreshold: cfg.BatchTxNumThreshold,
batchBlocksLimit: cfg.BatchBlocksLimit,
proofGenerationFreq: cfg.ProofGenerationFreq,
skippedOpcodes: cfg.SkippedOpcodes,
}
}
func (w *batchProposer) tryProposeBatch() {
w.mutex.Lock()
defer w.mutex.Unlock()
blocks, err := w.orm.GetUnbatchedBlocks(
map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", w.batchBlocksLimit),
)
if err != nil {
log.Error("failed to get unbatched blocks", "err", err)
return
}
if len(blocks) == 0 {
return
}
if blocks[0].GasUsed > w.batchGasThreshold {
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
if blocks[0].TxNum > w.batchTxNumThreshold {
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
var (
length = len(blocks)
gasUsed, txNum uint64
)
// add blocks into batch until reach batchGasThreshold
for i, block := range blocks {
if (gasUsed+block.GasUsed > w.batchGasThreshold) || (txNum+block.TxNum > w.batchTxNumThreshold) {
blocks = blocks[:i]
break
}
gasUsed += block.GasUsed
txNum += block.TxNum
}
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if length == len(blocks) && blocks[0].BlockTimestamp+w.batchTimeSec > uint64(time.Now().Unix()) {
return
}
if err = w.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
}
}
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {
dbTx, err := w.orm.Beginx()
if err != nil {
return err
}
var dbTxErr error
defer func() {
if dbTxErr != nil {
if err := dbTx.Rollback(); err != nil {
log.Error("dbTx.Rollback()", "err", err)
}
}
}()
var (
batchID string
startBlock = blocks[0]
endBlock = blocks[len(blocks)-1]
txNum, gasUsed uint64
blockIDs = make([]uint64, len(blocks))
)
for i, block := range blocks {
txNum += block.TxNum
gasUsed += block.GasUsed
blockIDs[i] = block.Number
}
batchID, dbTxErr = w.orm.NewBatchInDBTx(dbTx, startBlock, endBlock, startBlock.ParentHash, txNum, gasUsed)
if dbTxErr != nil {
return dbTxErr
}
if dbTxErr = w.orm.SetBatchIDForBlocksInDBTx(dbTx, blockIDs, batchID); dbTxErr != nil {
return dbTxErr
}
dbTxErr = dbTx.Commit()
return dbTxErr
}

View File

@@ -0,0 +1,62 @@
package l2
import (
"encoding/json"
"fmt"
"math/big"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/common/utils"
)
func testBatchProposer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
trace2 := &types.BlockTrace{}
trace3 := &types.BlockTrace{}
data, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
err = json.Unmarshal(data, trace2)
assert.NoError(t, err)
data, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
err = json.Unmarshal(data, trace3)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertBlockTraces([]*types.BlockTrace{trace2, trace3}))
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(1))
proposer := newBatchProposer(&config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, db)
proposer.tryProposeBatch()
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, true, len(infos) == 0)
exist, err := db.BatchRecordExist(id)
assert.NoError(t, err)
assert.Equal(t, true, exist)
}

View File

@@ -10,7 +10,8 @@ import (
"github.com/scroll-tech/go-ethereum/log"
)
func blockTraceIsValid(trace *types.BlockResult) bool {
//nolint:unused
func blockTraceIsValid(trace *types.BlockTrace) bool {
if trace == nil {
log.Warn("block trace is empty")
return false
@@ -22,6 +23,7 @@ func blockTraceIsValid(trace *types.BlockResult) bool {
return flag
}
//nolint:unused
func structLogResIsValid(txLogs []*types.StructLogRes) bool {
res := true
for i := 0; i < len(txLogs); i++ {
@@ -48,6 +50,7 @@ func structLogResIsValid(txLogs []*types.StructLogRes) bool {
return res
}
//nolint:unused
func codeIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
@@ -60,6 +63,7 @@ func codeIsValid(txLog *types.StructLogRes, n int) bool {
return true
}
//nolint:unused
func stateIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
@@ -73,7 +77,7 @@ func stateIsValid(txLog *types.StructLogRes, n int) bool {
}
// TraceHasUnsupportedOpcodes check if exist unsupported opcodes
func TraceHasUnsupportedOpcodes(opcodes map[string]struct{}, trace *types.BlockResult) bool {
func TraceHasUnsupportedOpcodes(opcodes map[string]struct{}, trace *types.BlockTrace) bool {
if trace == nil {
return false
}

95
bridge/l2/l2_test.go Normal file
View File

@@ -0,0 +1,95 @@
package l2
import (
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
// docker consider handler.
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
redisImg docker.ImgInstance
// l2geth client
l2Cli *ethclient.Client
)
func setupEnv(t *testing.T) (err error) {
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
// Create l2geth container.
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.Persistence.DriverName)
cfg.DBConfig.Persistence.DSN = dbImg.Endpoint()
// Create redis container.
redisImg = docker.NewTestRedisDocker(t)
cfg.DBConfig.Redis.URL = redisImg.Endpoint()
// Create l2geth client.
l2Cli, err = ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
return err
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if l1gethImg != nil {
assert.NoError(t, l1gethImg.Stop())
}
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
if redisImg != nil {
assert.NoError(t, redisImg.Stop())
}
}
func TestFunction(t *testing.T) {
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
// Run l2 watcher test cases.
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
// Run l2 relayer test cases.
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("testL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("testBatchProposer", testBatchProposer)
t.Cleanup(func() {
free(t)
})
}

View File

@@ -2,18 +2,20 @@ package l2
import (
"context"
"errors"
"fmt"
"math/big"
"strconv"
"runtime"
"sync"
"time"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
"scroll-tech/database"
"scroll-tech/database/orm"
@@ -21,6 +23,7 @@ import (
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/utils"
)
// Layer2Relayer is responsible for
@@ -30,110 +33,121 @@ import (
// Actions are triggered by new head from layer 1 geth node.
// @todo It's better to be triggered by watcher.
type Layer2Relayer struct {
ctx context.Context
client *ethclient.Client
sender *sender.Sender
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
ctx context.Context
db database.OrmFactory
cfg *config.RelayerConfig
messageSender *sender.Sender
messageCh <-chan *sender.Confirmation
l1MessengerABI *abi.ABI
l1RollupABI *abi.ABI
// a list of processing message, indexed by layer2 hash
processingMessage map[string]string
rollupSender *sender.Sender
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
// a list of processing block, indexed by block height
processingBlock map[string]uint64
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
// a list of processing proof, indexed by block height
processingProof map[string]uint64
// A list of processing batch commitment.
// key(string): confirmation ID, value(string): batch id.
processingCommitment sync.Map
// channel used to communicate with transaction sender
confirmationCh <-chan *sender.Confirmation
stopCh chan struct{}
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch id.
processingFinalization sync.Map
stopCh chan struct{}
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, proofGenFreq uint64, skippedOpcodes map[string]struct{}, l2ConfirmNum int64, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
l1MessengerABI, err := bridge_abi.L1MessengerMetaData.GetAbi()
if err != nil {
log.Error("Get L1MessengerABI failed", "err", err)
return nil, err
}
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
if err != nil {
log.Error("Get RollupABI failed", "err", err)
return nil, err
}
prv, err := crypto.HexToECDSA(cfg.PrivateKey)
if err != nil {
log.Error("Failed to import private key from config file")
return nil, err
}
func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize
sender, err := sender.NewSender(ctx, cfg.SenderConfig, prv)
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
log.Error("Failed to create sender", "err", err)
log.Error("Failed to create messenger sender", "err", err)
return nil, err
}
rollupSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.RollupSenderPrivateKeys)
if err != nil {
log.Error("Failed to create rollup sender", "err", err)
return nil, err
}
return &Layer2Relayer{
ctx: ctx,
client: ethClient,
sender: sender,
db: db,
l1MessengerABI: l1MessengerABI,
l1RollupABI: l1RollupABI,
cfg: cfg,
proofGenerationFreq: proofGenFreq,
skippedOpcodes: skippedOpcodes,
processingMessage: map[string]string{},
processingBlock: map[string]uint64{},
processingProof: map[string]uint64{},
stopCh: make(chan struct{}),
confirmationCh: sender.ConfirmChan(),
ctx: ctx,
db: db,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
l1MessengerABI: bridge_abi.L1MessengerMetaABI,
rollupSender: rollupSender,
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.RollupMetaABI,
cfg: cfg,
processingMessage: sync.Map{},
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
}, nil
}
const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() {
func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
defer wg.Done()
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return
}
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2UnprocessedMessages()
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": orm.MsgPending},
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
)
if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return
}
if len(msgs) == 0 {
return
}
msg := msgs[0]
// @todo add support to relay multiple messages
latestFinalizeHeight, err := r.db.GetLatestFinalizedBlock()
if err != nil {
log.Error("GetLatestFinalizedBlock failed", "err", err)
return
}
if latestFinalizeHeight < msg.Height {
// log.Warn("corresponding block not finalized", "status", status)
return
}
// process messages in batches
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
if size = len(msgs); size > batchSize {
size = batchSize
}
var g errgroup.Group
for _, msg := range msgs[:size] {
msg := msg
g.Go(func() error {
return r.processSavedEvent(msg, batch.Index)
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process l2 saved event", "err", err)
}
return
}
}
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockNumber: big.NewInt(int64(msg.Height)),
BlockHeight: big.NewInt(int64(msg.Height)),
BatchIndex: big.NewInt(0).SetUint64(index),
MerkleProof: make([]byte, 0),
}
sender := common.HexToAddress(msg.Sender)
from := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
value, ok := big.NewInt(0).SetString(msg.Value, 10)
if !ok {
@@ -145,222 +159,242 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
deadline := big.NewInt(int64(msg.Deadline))
msgNonce := big.NewInt(int64(msg.Nonce))
calldata := common.Hex2Bytes(msg.Calldata)
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", sender, target, value, fee, deadline, msgNonce, calldata, proof)
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, fee, deadline, msgNonce, calldata, proof)
if err != nil {
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return
return err
}
hash, err := r.sender.SendTransaction(msg.Layer2Hash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil {
log.Error("Failed to send relayMessageWithProof tx to L1", "err", err)
return
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
}
log.Info("relayMessageWithProof to layer1", "layer2hash", msg.Layer2Hash, "txhash", hash.String())
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
}
return err
}
log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String())
// save status in db
// @todo handle db error
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.Layer2Hash, hash.String(), orm.MsgSubmitted)
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, orm.MsgSubmitted, hash.String())
if err != nil {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "layer2hash", msg.Layer2Hash, "err", err)
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err
}
r.processingMessage[msg.Layer2Hash] = msg.Layer2Hash
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
return nil
}
// ProcessPendingBlocks submit block data to layer rollup contract
func (r *Layer2Relayer) ProcessPendingBlocks() {
// blocks are sorted by height in increasing order
blocksInDB, err := r.db.GetPendingBlocks()
// ProcessPendingBatches submit batch data to layer 1 rollup contract
func (r *Layer2Relayer) ProcessPendingBatches(wg *sync.WaitGroup) {
defer wg.Done()
// batches are sorted by batch index in increasing order
batchesInDB, err := r.db.GetPendingBatches(1)
if err != nil {
log.Error("Failed to fetch pending L2 blocks", "err", err)
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
if len(blocksInDB) == 0 {
if len(batchesInDB) == 0 {
return
}
height := blocksInDB[0]
// @todo add support to relay multiple blocks
id := batchesInDB[0]
// @todo add support to relay multiple batches
// will fetch missing block result from l2geth
trace, err := r.getOrFetchBlockResultByHeight(height)
if err != nil {
log.Error("getOrFetchBlockResultByHeight failed",
"height", height,
"err", err,
)
batches, err := r.db.GetBlockBatches(map[string]interface{}{"id": id})
if err != nil || len(batches) == 0 {
log.Error("Failed to GetBlockBatches", "batch_id", id, "err", err)
return
}
if trace == nil {
return
}
parentHash, err := r.getOrFetchBlockHashByHeight(height - 1)
if err != nil {
log.Error("getOrFetchBlockHashByHeight for parent block failed",
"parent height", height-1,
"err", err,
)
return
}
if parentHash == nil {
log.Error("parent hash is empty",
"height", height,
"err", err,
)
batch := batches[0]
traces, err := r.db.GetBlockTraces(map[string]interface{}{"batch_id": id}, "ORDER BY number ASC")
if err != nil || len(traces) == 0 {
log.Error("Failed to GetBlockTraces", "batch_id", id, "err", err)
return
}
header := bridge_abi.IZKRollupBlockHeader{
BlockHash: trace.BlockTrace.Hash,
ParentHash: *parentHash,
BaseFee: trace.BlockTrace.BaseFee.ToInt(),
StateRoot: trace.StorageTrace.RootAfter,
BlockHeight: trace.BlockTrace.Number.ToInt().Uint64(),
GasUsed: 0,
Timestamp: trace.BlockTrace.Time,
ExtraData: make([]byte, 0),
layer2Batch := &bridge_abi.IZKRollupLayer2Batch{
BatchIndex: batch.Index,
ParentHash: common.HexToHash(batch.ParentHash),
Blocks: make([]bridge_abi.IZKRollupLayer2BlockHeader, len(traces)),
}
txns := make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.BlockTrace.Transactions))
for i, tx := range trace.BlockTrace.Transactions {
txns[i] = bridge_abi.IZKRollupLayer2Transaction{
Caller: tx.From,
Nonce: tx.Nonce,
Gas: tx.Gas,
GasPrice: tx.GasPrice.ToInt(),
Value: tx.Value.ToInt(),
Data: common.Hex2Bytes(tx.Data),
parentHash := common.HexToHash(batch.ParentHash)
for i, trace := range traces {
layer2Batch.Blocks[i] = bridge_abi.IZKRollupLayer2BlockHeader{
BlockHash: trace.Header.Hash(),
ParentHash: parentHash,
BaseFee: trace.Header.BaseFee,
StateRoot: trace.StorageTrace.RootAfter,
BlockHeight: trace.Header.Number.Uint64(),
GasUsed: 0,
Timestamp: trace.Header.Time,
ExtraData: make([]byte, 0),
Txs: make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.Transactions)),
}
if tx.To != nil {
txns[i].Target = *tx.To
for j, tx := range trace.Transactions {
layer2Batch.Blocks[i].Txs[j] = bridge_abi.IZKRollupLayer2Transaction{
Caller: tx.From,
Nonce: tx.Nonce,
Gas: tx.Gas,
GasPrice: tx.GasPrice.ToInt(),
Value: tx.Value.ToInt(),
Data: common.Hex2Bytes(tx.Data),
R: tx.R.ToInt(),
S: tx.S.ToInt(),
V: tx.V.ToInt().Uint64(),
}
if tx.To != nil {
layer2Batch.Blocks[i].Txs[j].Target = *tx.To
}
layer2Batch.Blocks[i].GasUsed += trace.ExecutionResults[j].Gas
}
header.GasUsed += trace.ExecutionResults[i].Gas
// for next iteration
parentHash = layer2Batch.Blocks[i].BlockHash
}
data, err := r.l1RollupABI.Pack("commitBlock", header, txns)
data, err := r.l1RollupABI.Pack("commitBatch", layer2Batch)
if err != nil {
log.Error("Failed to pack commitBlock", "height", height, "err", err)
log.Error("Failed to pack commitBatch", "id", id, "index", batch.Index, "err", err)
return
}
hash, err := r.sender.SendTransaction(strconv.FormatUint(height, 10), &r.cfg.RollupContractAddress, big.NewInt(0), data)
txID := id + "-commit"
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
if err != nil {
log.Error("Failed to send commitBlock tx to layer1 ", "height", height, "err", err)
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
}
return
}
log.Info("commitBlock in layer1", "height", height, "hash", hash)
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateRollupTxHashAndStatus(r.ctx, height, hash.String(), orm.RollupCommitting)
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
if err != nil {
log.Error("UpdateRollupTxHashAndStatus failed", "height", height, "err", err)
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
}
r.processingBlock[strconv.FormatUint(height, 10)] = height
r.processingCommitment.Store(txID, id)
}
// ProcessCommittedBlocks submit proof to layer rollup contract
func (r *Layer2Relayer) ProcessCommittedBlocks() {
// blocks are sorted by height in increasing order
blocksInDB, err := r.db.GetCommittedBlocks()
if err != nil {
log.Error("Failed to fetch committed L2 blocks", "err", err)
return
}
if len(blocksInDB) == 0 {
return
}
height := blocksInDB[0]
// @todo add support to relay multiple blocks
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
defer wg.Done()
status, err := r.db.GetBlockStatusByNumber(height)
// set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
log.Info("Skipping batches", "count", count)
}
// batches are sorted by batch index in increasing order
batches, err := r.db.GetCommittedBatches(1)
if err != nil {
log.Error("GetBlockStatusByNumber failed", "height", height, "err", err)
log.Error("Failed to fetch committed L2 batches", "err", err)
return
}
if len(batches) == 0 {
return
}
id := batches[0]
// @todo add support to relay multiple batches
status, err := r.db.GetProvingStatusByID(id)
if err != nil {
log.Error("GetProvingStatusByID failed", "id", id, "err", err)
return
}
switch status {
case orm.BlockUnassigned, orm.BlockAssigned:
case orm.ProvingTaskUnassigned, orm.ProvingTaskAssigned:
// The proof for this block is not ready yet.
return
case orm.BlockProved:
case orm.ProvingTaskProved:
// It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case orm.BlockFailed, orm.BlockSkipped:
if err = r.db.UpdateRollupStatus(r.ctx, height, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "height", height, "err", err)
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
}
case orm.BlockVerified:
log.Info("Start to roll up zk proof", "height", height)
case orm.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "id", id)
success := false
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "height", height)
if err = r.db.UpdateRollupStatus(r.ctx, height, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "height", height, "err", err)
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "id", id)
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
}
}
}()
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByNumber(height)
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByID(id)
if err != nil {
log.Warn("fetch get proof by height failed", "height", height, "err", err)
log.Warn("fetch get proof by id failed", "id", id, "err", err)
return
}
if proofBuffer == nil || instanceBuffer == nil {
log.Warn("proof or instance not ready", "height", height)
log.Warn("proof or instance not ready", "id", id)
return
}
if len(proofBuffer)%32 != 0 {
log.Error("proof buffer has wrong length", "height", height, "length", len(proofBuffer))
log.Error("proof buffer has wrong length", "id", id, "length", len(proofBuffer))
return
}
if len(instanceBuffer)%32 != 0 {
log.Warn("instance buffer has wrong length", "height", height, "length", len(instanceBuffer))
log.Warn("instance buffer has wrong length", "id", id, "length", len(instanceBuffer))
return
}
proof := bufferToUint256Le(proofBuffer)
instance := bufferToUint256Le(instanceBuffer)
proof := utils.BufferToUint256Le(proofBuffer)
instance := utils.BufferToUint256Le(instanceBuffer)
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(id), proof, instance)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return
}
// it must in db
hash, err := r.db.GetHashByNumber(height)
txID := id + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
hash := &txHash
if err != nil {
log.Warn("fetch missing block result by height failed", "height", height, "err", err)
}
if hash == nil {
// only happen when trace validate failed
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("finalizeBatchWithProof in layer1 failed", "id", id, "err", err)
}
return
}
data, err := r.l1RollupABI.Pack("finalizeBlockWithProof", hash, proof, instance)
if err != nil {
log.Error("Pack finalizeBlockWithProof failed", err)
return
}
txHash, err := r.sender.SendTransaction(strconv.FormatUint(height, 10), &r.cfg.RollupContractAddress, big.NewInt(0), data)
hash = &txHash
if err != nil {
log.Error("finalizeBlockWithProof in layer1 failed",
"height", height,
"err", err,
)
return
}
log.Info("finalizeBlockWithProof in layer1", "height", height, "hash", hash)
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndStatus(r.ctx, height, hash.String(), orm.RollupFinalizing)
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndStatus failed", "height", height, "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
}
success = true
r.processingProof[strconv.FormatUint(height, 10)] = height
r.processingFinalization.Store(txID, id)
default:
log.Error("encounter unreachable case in ProcessCommittedBlocks",
log.Error("encounter unreachable case in ProcessCommittedBatches",
"block_status", status,
)
}
@@ -376,10 +410,15 @@ func (r *Layer2Relayer) Start() {
for {
select {
case <-ticker.C:
r.ProcessSavedEvents()
r.ProcessPendingBlocks()
r.ProcessCommittedBlocks()
case confirmation := <-r.confirmationCh:
var wg = sync.WaitGroup{}
wg.Add(3)
go r.ProcessSavedEvents(&wg)
go r.ProcessPendingBatches(&wg)
go r.ProcessCommittedBatches(&wg)
wg.Wait()
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
r.handleConfirmation(confirmation)
case <-r.stopCh:
return
@@ -393,130 +432,44 @@ func (r *Layer2Relayer) Stop() {
close(r.stopCh)
}
func (r *Layer2Relayer) getOrFetchBlockHashByHeight(height uint64) (*common.Hash, error) {
hash, err := r.db.GetHashByNumber(height)
if err != nil {
block, err := r.client.BlockByNumber(r.ctx, big.NewInt(int64(height)))
if err != nil {
return nil, err
}
x := block.Hash()
return &x, err
}
return hash, nil
}
func (r *Layer2Relayer) getOrFetchBlockResultByHeight(height uint64) (*types.BlockResult, error) {
tracesInDB, err := r.db.GetBlockResults(map[string]interface{}{"number": height})
if err != nil {
log.Warn("GetBlockResults failed", "height", height, "err", err)
return nil, err
}
if len(tracesInDB) == 0 {
return r.fetchMissingBlockResultByHeight(height)
}
return tracesInDB[0], nil
}
func (r *Layer2Relayer) fetchMissingBlockResultByHeight(height uint64) (*types.BlockResult, error) {
header, err := r.client.HeaderByNumber(r.ctx, big.NewInt(int64(height)))
if err != nil {
return nil, err
}
trace, err := r.client.GetBlockResultByHash(r.ctx, header.Hash())
if err != nil {
return nil, err
}
if blockTraceIsValid(trace) {
// skip verify for unsupported block
skip := false
if height%r.proofGenerationFreq != 0 {
log.Info("skip proof generation", "block", height)
skip = true
} else if TraceHasUnsupportedOpcodes(r.skippedOpcodes, trace) {
log.Info("block has unsupported opcodes, skip proof generation", "block", height)
skip = true
}
if skip {
if err = r.db.InsertBlockResultsWithStatus(r.ctx, []*types.BlockResult{trace}, orm.BlockSkipped); err != nil {
log.Error("failed to store missing blockResult", "err", err)
}
} else {
if err = r.db.InsertBlockResultsWithStatus(r.ctx, []*types.BlockResult{trace}, orm.BlockUnassigned); err != nil {
log.Error("failed to store missing blockResult", "err", err)
}
}
return trace, nil
}
return nil, nil
}
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
if !confirmation.IsSuccessful {
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
return
}
transactionType := "Unknown"
// check whether it is message relay transaction
if layer2Hash, ok := r.processingMessage[confirmation.ID]; ok {
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
transactionType = "MessageRelay"
// @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, layer2Hash, confirmation.TxHash.String(), orm.MsgConfirmed)
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), orm.MsgConfirmed, confirmation.TxHash.String())
if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "err", err)
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
}
delete(r.processingMessage, confirmation.ID)
r.processingMessage.Delete(confirmation.ID)
}
// check whether it is block commitment transaction
if blockHeight, ok := r.processingBlock[confirmation.ID]; ok {
transactionType = "BlockCommitment"
if batchID, ok := r.processingCommitment.Load(confirmation.ID); ok {
transactionType = "BatchCommitment"
// @todo handle db error
err := r.db.UpdateRollupTxHashAndStatus(r.ctx, blockHeight, confirmation.TxHash.String(), orm.RollupCommitted)
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupCommitted)
if err != nil {
log.Warn("UpdateRollupTxHashAndStatus failed", "err", err)
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
}
delete(r.processingBlock, confirmation.ID)
r.processingCommitment.Delete(confirmation.ID)
}
// check whether it is proof finalization transaction
if blockHeight, ok := r.processingProof[confirmation.ID]; ok {
if batchID, ok := r.processingFinalization.Load(confirmation.ID); ok {
transactionType = "ProofFinalization"
// @todo handle db error
err := r.db.UpdateFinalizeTxHashAndStatus(r.ctx, blockHeight, confirmation.TxHash.String(), orm.RollupFinalized)
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupFinalized)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndStatus failed", "err", err)
}
delete(r.processingProof, confirmation.ID)
// try to delete block trace
err = r.db.DeleteTraceByNumber(blockHeight)
if err != nil {
log.Warn("DeleteTraceByNumber failed", "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
}
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
//nolint:unused
func bufferToUint256Be(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
buffer256[i] = big.NewInt(0)
for j := 0; j < 32; j++ {
buffer256[i] = buffer256[i].Lsh(buffer256[i], 8)
buffer256[i] = buffer256[i].Add(buffer256[i], big.NewInt(int64(buffer[i*32+j])))
}
}
return buffer256
}
func bufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
v := big.NewInt(0)
shft := big.NewInt(1)
for j := 0; j < 32; j++ {
v = new(big.Int).Add(v, new(big.Int).Mul(shft, big.NewInt(int64(buffer[i*32+j]))))
shft = new(big.Int).Mul(shft, big.NewInt(256))
}
buffer256[i] = v
}
return buffer256
}

View File

@@ -1,29 +1,24 @@
package l2_test
package l2
import (
"context"
"encoding/json"
"math/big"
"os"
"sync"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/mock"
)
var (
templateLayer2Message = []*orm.Layer2Message{
templateL2Message = []*orm.L2Message{
{
Nonce: 1,
Height: 1,
@@ -37,198 +32,242 @@ var (
Layer2Hash: "hash0",
},
}
l1Docker docker.ImgInstance
l2Docker docker.ImgInstance
dbDocker docker.ImgInstance
)
func setupEnv(t *testing.T) {
l1Docker = mock.NewTestL1Docker(t, TEST_CONFIG)
l2Docker = mock.NewTestL2Docker(t, TEST_CONFIG)
dbDocker = mock.GetDbDocker(t, TEST_CONFIG)
func testCreateNewRelayer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
relayer.Start()
}
func TestRelayerFunction(t *testing.T) {
// Setup
setupEnv(t)
func testL2RelayerProcessSaveEvents(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
t.Run("TestCreateNewRelayer", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
err = db.SaveL2Messages(context.Background(), templateL2Message)
assert.NoError(t, err)
db, err := database.NewOrmFactory(TEST_CONFIG.DB_CONFIG)
assert.NoError(t, err)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
relayer.Start()
defer relayer.Stop()
})
t.Run("TestL2RelayerProcessSaveEvents", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
err = db.SaveLayer2Messages(context.Background(), templateLayer2Message)
assert.NoError(t, err)
blocks := []*orm.RollupResult{
{
Number: 3,
Status: orm.RollupFinalized,
RollupTxHash: "Rollup Test Hash",
FinalizeTxHash: "Finalized Hash",
traces := []*types.BlockTrace{
{
Header: &types.Header{
Number: big.NewInt(int64(templateL2Message[0].Height)),
},
}
err = db.InsertPendingBlocks(context.Background(), []uint64{uint64(blocks[0].Number)})
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), uint64(blocks[0].Number), orm.RollupFinalized)
assert.NoError(t, err)
relayer.ProcessSavedEvents()
msg, err := db.GetLayer2MessageByNonce(templateLayer2Message[0].Nonce)
assert.NoError(t, err)
assert.Equal(t, orm.MsgSubmitted, msg.Status)
})
t.Run("TestL2RelayerProcessPendingBlocks", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
// this blockresult has number of 0x4, need to change it to match the testcase
// In this testcase scenario, db will store two blocks with height 0x4 and 0x3
var results []*types.BlockResult
templateBlockResult, err := os.ReadFile("../../common/testdata/blockResult_relayer_parent.json")
assert.NoError(t, err)
blockResult := &types.BlockResult{}
err = json.Unmarshal(templateBlockResult, blockResult)
assert.NoError(t, err)
results = append(results, blockResult)
templateBlockResult, err = os.ReadFile("../../common/testdata/blockResult_relayer.json")
assert.NoError(t, err)
blockResult = &types.BlockResult{}
err = json.Unmarshal(templateBlockResult, blockResult)
assert.NoError(t, err)
results = append(results, blockResult)
err = db.InsertBlockResultsWithStatus(context.Background(), results, orm.BlockUnassigned)
assert.NoError(t, err)
blocks := []*orm.RollupResult{
{
Number: 4,
Status: 1,
RollupTxHash: "Rollup Test Hash",
FinalizeTxHash: "Finalized Hash",
},
{
Header: &types.Header{
Number: big.NewInt(int64(templateL2Message[0].Height + 1)),
},
}
err = db.InsertPendingBlocks(context.Background(), []uint64{uint64(blocks[0].Number)})
},
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{Number: templateL2Message[0].Height},
&orm.BlockInfo{Number: templateL2Message[0].Height + 1},
"0f", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
templateL2Message[0].Height,
templateL2Message[0].Height + 1}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupFinalized)
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessSavedEvents(&wg)
wg.Wait()
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce)
assert.NoError(t, err)
assert.Equal(t, orm.MsgSubmitted, msg.Status)
}
func testL2RelayerProcessPendingBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
// this blockresult has number of 0x4, need to change it to match the testcase
// In this testcase scenario, db will store two blocks with height 0x4 and 0x3
var traces []*types.BlockTrace
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
blockTrace := &types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
traces = append(traces, blockTrace)
templateBlockTrace, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
blockTrace = &types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
traces = append(traces, blockTrace)
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{Number: traces[0].Header.Number.Uint64()},
&orm.BlockInfo{Number: traces[1].Header.Number.Uint64()},
"ff", 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[1].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
// err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupPending)
// assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessPendingBatches(&wg)
wg.Wait()
// Check if Rollup Result is changed successfully
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
}
func testL2RelayerProcessCommittedBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupCommitted)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
}
func testL2RelayerSkipBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
createBatch := func(rollupStatus orm.RollupStatus, provingStatus orm.ProvingStatus) string {
dbTx, err := db.Beginx()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), uint64(blocks[0].Number), orm.RollupPending)
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
relayer.ProcessPendingBlocks()
// Check if Rollup Result is changed successfully
status, err := db.GetRollupStatus(uint64(blocks[0].Number))
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
})
t.Run("TestL2RelayerProcessCommittedBlocks", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
client, err := ethclient.Dial(l2Docker.Endpoint())
err = db.UpdateRollupStatus(context.Background(), batchID, rollupStatus)
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
templateBlockResult, err := os.ReadFile("../../common/testdata/blockResult_relayer.json")
assert.NoError(t, err)
blockResult := &types.BlockResult{}
err = json.Unmarshal(templateBlockResult, blockResult)
assert.NoError(t, err)
err = db.InsertBlockResultsWithStatus(context.Background(), []*types.BlockResult{blockResult}, orm.BlockVerified)
assert.NoError(t, err)
blocks := []*orm.RollupResult{
{
Number: 4,
Status: 1,
RollupTxHash: "Rollup Test Hash",
FinalizeTxHash: "Finalized Hash",
},
}
err = db.InsertPendingBlocks(context.Background(), []uint64{uint64(blocks[0].Number)})
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), uint64(blocks[0].Number), orm.RollupCommitted)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tStateProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByNumber(context.Background(), uint64(blocks[0].Number), tProof, tStateProof, 100)
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
relayer.ProcessCommittedBlocks()
status, err := db.GetRollupStatus(uint64(blocks[0].Number))
err = db.UpdateProvingStatus(batchID, provingStatus)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
})
// Teardown
t.Cleanup(func() {
assert.NoError(t, l1Docker.Stop())
assert.NoError(t, l2Docker.Stop())
assert.NoError(t, dbDocker.Stop())
})
return batchID
}
skipped := []string{
createBatch(orm.RollupCommitted, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitted, orm.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(orm.RollupPending, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitting, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalizing, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalized, orm.ProvingTaskSkipped),
createBatch(orm.RollupPending, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitting, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalizing, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalized, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
}
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
for _, id := range skipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizationSkipped, status)
}
for _, id := range notSkipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.NotEqual(t, orm.RollupFinalizationSkipped, status)
}
}

View File

@@ -4,9 +4,10 @@ import (
"context"
"fmt"
"math/big"
"reflect"
"time"
"github.com/scroll-tech/go-ethereum"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -14,20 +15,20 @@ import (
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/utils"
"scroll-tech/database"
"scroll-tech/database/orm"
"scroll-tech/bridge/config"
)
const (
// BufferSize for the BlockResult channel
BufferSize = 16
// BlockResultCacheSize for the latest handled blockresults in memory.
BlockResultCacheSize = 64
// keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
sentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
)
type relayedMessage struct {
msgHash common.Hash
txHash common.Hash
isSuccessful bool
}
// WatcherClient provide APIs which support others to subscribe to various event from l2geth
type WatcherClient struct {
@@ -38,76 +39,133 @@ type WatcherClient struct {
orm database.OrmFactory
confirmations uint64
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
messengerAddress common.Address
messengerABI *abi.ABI
confirmations uint64
messengerAddress common.Address
messengerABI *abi.ABI
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
stopped uint64
stopCh chan struct{}
batchProposer *batchProposer
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, proofGenFreq uint64, skippedOpcodes map[string]struct{}, messengerAddress common.Address, messengerABI *abi.ABI, orm database.OrmFactory) *WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) (*WatcherClient, error) {
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
if err != nil {
log.Warn("fetch height from db failed", "err", err)
savedHeight = 0
}
return &WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
processedMsgHeight: uint64(savedHeight),
confirmations: confirmations,
proofGenerationFreq: proofGenFreq,
skippedOpcodes: skippedOpcodes,
messengerAddress: messengerAddress,
messengerABI: messengerABI,
stopCh: make(chan struct{}),
stopped: 0,
watcher := &WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
processedMsgHeight: uint64(savedHeight),
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L2MessengerMetaABI,
stopCh: make(chan struct{}),
stopped: 0,
batchProposer: newBatchProposer(bpCfg, orm),
}
// Init cache, if traces in cache expired reset it.
if err = watcher.initCache(ctx); err != nil {
log.Error("failed to init cache in l2 watcher")
return nil, err
}
return watcher, nil
}
// Start the Listening process
func (w *WatcherClient) Start() {
go func() {
if w.orm == nil {
panic("must run L2 watcher with DB")
if reflect.ValueOf(w.orm).IsNil() {
panic("must run L2 watcher with Persistence")
}
// trigger by timer
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
ctx, cancel := context.WithCancel(w.ctx)
for {
select {
case <-ticker.C:
// get current height
number, err := w.BlockNumber(w.ctx)
if err != nil {
log.Error("Failed to get_BlockNumber", "err", err)
continue
}
if err = w.tryFetchRunningMissingBlocks(w.ctx, number); err != nil {
log.Error("Failed to fetchRunningMissingBlocks", "err", err)
}
// trace fetcher loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
// @todo handle error
err = w.fetchContractEvent(number)
if err != nil {
log.Error("Failed to fetchContractEvent", "err", err)
}
for {
select {
case <-ctx.Done():
return
case <-w.stopCh:
return
case <-ticker.C:
// get current height
number, err := w.BlockNumber(ctx)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
continue
}
if number >= w.confirmations {
number = number - w.confirmations
} else {
number = 0
}
w.tryFetchRunningMissingBlocks(ctx, number)
}
}
}
}(ctx)
// event fetcher loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
// get current height
number, err := w.BlockNumber(ctx)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
continue
}
if number >= w.confirmations {
number = number - w.confirmations
} else {
number = 0
}
w.FetchContractEvent(number)
}
}
}(ctx)
// batch proposer loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
w.batchProposer.tryProposeBatch()
}
}
}(ctx)
<-w.stopCh
cancel()
}()
}
@@ -116,150 +174,202 @@ func (w *WatcherClient) Stop() {
w.stopCh <- struct{}{}
}
const blockTracesFetchLimit = uint64(10)
// try fetch missing blocks if inconsistent
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, backTrackFrom uint64) error {
// Get newest block in DB. must have blocks at that time.
// Don't use "block_result" table "content" column's BlockTrace.Number,
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
// Get newest block in Persistence. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetBlockResultsLatestHeight()
heightInDB, err := w.orm.GetBlockTracesLatestHeight()
if err != nil {
return fmt.Errorf("Failed to GetBlockResults in DB: %v", err)
log.Error("failed to GetBlockTracesLatestHeight", "err", err)
return
}
backTrackTo := uint64(0)
// Can't get trace from genesis block, so the default start number is 1.
var from = uint64(1)
if heightInDB > 0 {
backTrackTo = uint64(heightInDB)
from = uint64(heightInDB) + 1
}
// start backtracking
heights := []uint64{}
toSkipped := []*types.BlockResult{}
toUnassigned := []*types.BlockResult{}
for ; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1
var (
header *types.Header
trace *types.BlockResult
)
for number := backTrackFrom; number > backTrackTo; number-- {
header, err = w.HeaderByNumber(ctx, big.NewInt(int64(number)))
if err != nil {
return fmt.Errorf("Failed to get HeaderByNumber: %v. number: %v", err, number)
if to > blockHeight {
to = blockHeight
}
trace, err = w.GetBlockResultByHash(ctx, header.Hash())
if err != nil {
return fmt.Errorf("Failed to GetBlockResultByHash: %v. number: %v", err, number)
}
log.Info("Retrieved block result", "height", header.Number, "hash", header.Hash())
heights = append(heights, number)
if number%w.proofGenerationFreq != 0 {
toSkipped = append(toSkipped, trace)
} else if TraceHasUnsupportedOpcodes(w.skippedOpcodes, trace) {
toSkipped = append(toSkipped, trace)
} else {
toUnassigned = append(toUnassigned, trace)
// Get block traces and insert into db.
if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
}
}
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var traces []*types.BlockTrace
for number := from; number <= to; number++ {
log.Debug("retrieving block trace", "height", number)
trace, err2 := w.GetBlockTraceByNumber(ctx, big.NewInt(int64(number)))
if err2 != nil {
return fmt.Errorf("failed to GetBlockResultByHash: %v. number: %v", err2, number)
}
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash().String())
traces = append(traces, trace)
}
if len(traces) > 0 {
if err := w.orm.InsertBlockTraces(traces); err != nil {
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
}
}
if len(heights) > 0 {
if err = w.orm.InsertPendingBlocks(ctx, heights); err != nil {
return fmt.Errorf("Failed to batch insert PendingBlocks: %v", err)
}
}
if len(toSkipped) > 0 {
if err = w.orm.InsertBlockResultsWithStatus(ctx, toSkipped, orm.BlockSkipped); err != nil {
return fmt.Errorf("failed to batch insert PendingBlocks: %v", err)
}
}
if len(toUnassigned) > 0 {
if err = w.orm.InsertBlockResultsWithStatus(ctx, toUnassigned, orm.BlockUnassigned); err != nil {
return fmt.Errorf("failed to batch insert PendingBlocks: %v", err)
}
}
return nil
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in Persistence
func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(w.confirmations)
toBlock := int64(blockHeight)
if toBlock < fromBlock {
return nil
}
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
// warning: uint int conversion...
query := ethereum.FilterQuery{
FromBlock: big.NewInt(fromBlock), // inclusive
ToBlock: big.NewInt(toBlock), // inclusive
Addresses: []common.Address{
w.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 1)
query.Topics[0][0] = common.HexToHash(sentMessageEventSignature)
if to > toBlock {
to = toBlock
}
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
return nil
}
log.Info("Received new L2 messages", "fromBlock", fromBlock, "toBlock", toBlock,
"cnt", len(logs))
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 3)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
eventLogs, err := parseBridgeEventLogs(logs, w.messengerABI)
if err != nil {
log.Error("Failed to parse emitted event log", "err", err)
return err
}
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("failed to get event logs", "err", err)
return
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
continue
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
err = w.orm.SaveLayer2Messages(w.ctx, eventLogs)
if err == nil {
w.processedMsgHeight = uint64(toBlock)
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return
}
// Update relayed message first to make sure we don't forget to update submited message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return
}
}
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
log.Error("failed to save l2 messages", "err", err)
return
}
w.processedMsgHeight = uint64(to)
}
return err
}
func parseBridgeEventLogs(logs []types.Log, messengerABI *abi.ABI) ([]*orm.Layer2Message, error) {
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var parsedlogs []*orm.Layer2Message
var l2Messages []*orm.L2Message
var relayedMessages []relayedMessage
for _, vLog := range logs {
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
err := messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
if err != nil {
log.Error("Failed to unpack layer2 SentMessage event", "err", err)
return parsedlogs, err
err := w.messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
if err != nil {
log.Error("failed to unpack layer2 SentMessage event", "err", err)
return l2Messages, relayedMessages, err
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
l2Messages = append(l2Messages, &orm.L2Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
// MsgHash is in topics[1]
event.MsgHash = common.HexToHash(vLog.Topics[1].String())
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MsgHash,
txHash: vLog.TxHash,
isSuccessful: false,
})
default:
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
parsedlogs = append(parsedlogs, &orm.Layer2Message{
Nonce: event.MessageNonce.Uint64(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
}
return parsedlogs, nil
return l2Messages, relayedMessages, nil
}

View File

@@ -1,33 +1,21 @@
package l2
import (
"errors"
"context"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/scroll-tech/go-ethereum/core/types"
)
// WatcherAPI watcher api service
type WatcherAPI interface {
ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error)
GetTracesByBatchIndex(ctx context.Context, index uint64) ([]*types.BlockTrace, error)
}
// ReplayBlockResultByHash temporary interface for easy testing.
func (r *WatcherClient) ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error) {
orm := r.orm
params := make(map[string]interface{})
if number, ok := blockNrOrHash.Number(); ok {
params["number"] = int64(number)
}
if hash, ok := blockNrOrHash.Hash(); ok {
params["hash"] = hash.String()
}
if len(params) == 0 {
return false, errors.New("empty params")
}
trace, err := orm.GetBlockResults(params)
// GetTracesByBatchIndex get traces by batch_id.
func (w *WatcherClient) GetTracesByBatchIndex(ctx context.Context, index uint64) ([]*types.BlockTrace, error) {
id, err := w.orm.GetBatchIDByIndex(index)
if err != nil {
return false, err
return nil, err
}
r.Send(&trace[0])
return true, nil
return w.orm.GetBlockTraces(map[string]interface{}{"batch_id": id})
}

121
bridge/l2/watcher_init.go Normal file
View File

@@ -0,0 +1,121 @@
package l2
import (
"context"
"fmt"
"math/big"
"runtime"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
"scroll-tech/database/cache"
"scroll-tech/database/orm"
)
func (w *WatcherClient) initCache(ctx context.Context) error {
var (
// Use at most half of the system threads.
parallel = (runtime.GOMAXPROCS(0) + 1) / 2
db = w.orm
)
// Fill unsigned block traces.
for {
batches, err := db.GetBlockBatches(
map[string]interface{}{"proving_status": orm.ProvingTaskUnassigned},
fmt.Sprintf("ORDER BY index ASC LIMIT %d;", parallel),
)
if err != nil {
log.Error("failed to get block batch", "err", err)
return err
}
if len(batches) == 0 {
break
}
var eg errgroup.Group
for _, batch := range batches {
batch := batch
eg.Go(func() error {
return w.fillTraceByNumber(ctx, batch.StartBlockNumber, batch.EndBlockNumber)
})
}
if err = eg.Wait(); err != nil {
return err
}
}
// Fill assigned and under proofing block traces into cache.
ids, err := w.orm.GetAssignedBatchIDs()
if err != nil {
return err
}
for _, id := range ids {
err = w.fillTraceByID(ctx, id)
if err != nil {
log.Error("failed to fill traces by id", "id", id, "err", err)
return err
}
}
// Fill pending block traces into cache.
for {
ids, err = w.orm.GetPendingBatches(uint64(parallel))
if err != nil {
log.Error("failed to get pending batch ids", "err", err)
return err
}
if len(ids) == 0 {
log.Info("L2 WatcherClient initCache done")
return nil
}
for _, id := range ids {
err = w.fillTraceByID(ctx, id)
if err != nil {
log.Error("failed to fill traces by id", "id", id, "err", err)
return err
}
}
}
}
// fillTraceByID Fill block traces by batch id.
func (w *WatcherClient) fillTraceByID(ctx context.Context, id string) error {
batches, err := w.orm.GetBlockBatches(map[string]interface{}{"id": id})
if err != nil || len(batches) == 0 {
return err
}
batch := batches[0]
err = w.fillTraceByNumber(ctx, batch.StartBlockNumber, batch.EndBlockNumber)
if err != nil {
return err
}
return nil
}
func (w *WatcherClient) fillTraceByNumber(ctx context.Context, start, end uint64) error {
var (
rdb = w.orm.(cache.Cache)
client = w.Client
)
for height := start; height <= end; height++ {
number := big.NewInt(0).SetUint64(height)
exist, err := rdb.ExistTrace(ctx, number)
if err != nil {
return err
}
if exist {
continue
}
trace, err := client.GetBlockTraceByNumber(ctx, number)
if err != nil {
return err
}
err = rdb.SetBlockTrace(ctx, trace)
if err != nil {
return err
}
}
return nil
}

View File

@@ -1,319 +1,207 @@
package l2_test
package l2
import (
"context"
"crypto/ecdsa"
"encoding/json"
"math/big"
"os"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/l2"
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/database"
db_config "scroll-tech/database"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
)
const TEST_BUFFER = 500
var TEST_CONFIG = &mock.TestConfig{
L1GethTestConfig: mock.L1GethTestConfig{
HPort: 0,
WPort: 8571,
},
L2GethTestConfig: mock.L2GethTestConfig{
HPort: 0,
WPort: 8567,
},
DbTestconfig: mock.DbTestconfig{
DbName: "testwatcher_db",
DbPort: 5438,
DB_CONFIG: &db_config.DBConfig{
DriverName: utils.GetEnvWithDefault("TEST_DB_DRIVER", "postgres"),
DSN: utils.GetEnvWithDefault("TEST_DB_DSN", "postgres://postgres:123456@localhost:5438/testwatcher_db?sslmode=disable"),
},
},
}
var (
// previousHeight store previous chain height
previousHeight uint64
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
l2Backend *l2.Backend
)
func setenv(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
func testCreateNewWatcherAndStop(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
l1gethImg = mock.NewTestL1Docker(t, TEST_CONFIG)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
l2Backend, l2gethImg, dbImg = mock.L2gethDocker(t, cfg, TEST_CONFIG)
}
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
func TestWatcherFunction(t *testing.T) {
setenv(t)
t.Run("TestL2Backend", func(t *testing.T) {
err := l2Backend.Start()
assert.NoError(t, err)
l2Backend.Stop()
})
t.Run("TestCreateNewWatcherAndStop", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
l2cfg := cfg.L2Config
rc, err := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
assert.NoError(t, err)
rc.Start()
defer rc.Stop()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
messengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
assert.NoError(t, err)
l2db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
proofGenerationFreq := cfg.L2Config.ProofGenerationFreq
if proofGenerationFreq == 0 {
proofGenerationFreq = 1
}
rc := l2.NewL2WatcherClient(context.Background(), client, cfg.L2Config.Confirmations, proofGenerationFreq, skippedOpcodes, cfg.L2Config.L2MessengerAddress, messengerABI, l2db)
rc.Start()
// Create several transactions and commit to block
numTransactions := 3
for i := 0; i < numTransactions; i++ {
tx := mock.SendTxToL2Client(t, client, cfg.L2Config.RelayerConfig.PrivateKey)
// wait for mining
_, err = bind.WaitMined(context.Background(), client, tx)
assert.NoError(t, err)
}
<-time.After(10 * time.Second)
blockNum, err := client.BlockNumber(context.Background())
assert.NoError(t, err)
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
rc.Stop()
l2db.Close()
})
t.Run("TestMonitorBridgeContract", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
t.Log("confirmations:", cfg.L2Config.Confirmations)
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
previousHeight, err = client.BlockNumber(context.Background())
assert.NoError(t, err)
auth := prepareAuth(t, client, cfg.L2Config.RelayerConfig.PrivateKey)
// deploy mock bridge
_, tx, instance, err := mock_bridge.DeployMockBridge(auth, client)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), client, tx)
assert.NoError(t, err)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
rc := prepareRelayerClient(client, db, address)
rc.Start()
// Call mock_bridge instance sendMessage to trigger emit events
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, err := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, err)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
//extra block mined
addr = common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message = []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
var latestHeight uint64
latestHeight, err = client.BlockNumber(context.Background())
assert.NoError(t, err)
t.Log("Latest height is", latestHeight)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("Height in DB is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2UnprocessedMessages()
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
rc.Stop()
db.Close()
})
t.Run("TestFetchMultipleSentMessageInOneBlock", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
previousHeight, err := client.BlockNumber(context.Background()) // shallow the global previousHeight
assert.NoError(t, err)
auth := prepareAuth(t, client, cfg.L2Config.RelayerConfig.PrivateKey)
_, trx, instance, err := mock_bridge.DeployMockBridge(auth, client)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), client, trx)
assert.NoError(t, err)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
rc := prepareRelayerClient(client, db, address)
rc.Start()
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
var tx *types.Transaction
for i := 0; i < numTransactions; i++ {
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
}
receipt, err := bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2UnprocessedMessages()
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
rc.Stop()
db.Close()
})
// Teardown
t.Cleanup(func() {
assert.NoError(t, l1gethImg.Stop())
assert.NoError(t, l2gethImg.Stop())
assert.NoError(t, dbImg.Stop())
})
}
func TestTraceHasUnsupportedOpcodes(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = 0
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
assert.NoError(t, err)
delegateTrace, err := os.ReadFile("../../common/testdata/blockResult_delegate.json")
assert.NoError(t, err)
trace := &types.BlockResult{}
assert.NoError(t, json.Unmarshal(delegateTrace, &trace))
unsupportedOpcodes := make(map[string]struct{})
for _, val := range cfg.L2Config.SkippedOpcodes {
unsupportedOpcodes[val] = struct{}{}
// Create several transactions and commit to block
numTransactions := 3
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
for i := 0; i < numTransactions; i++ {
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil)
assert.NoError(t, err)
<-newSender.ConfirmChan()
}
blockNum, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
}
func prepareRelayerClient(client *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *l2.WatcherClient {
messengerABI, _ := bridge_abi.L1MessengerMetaData.GetAbi()
return l2.NewL2WatcherClient(context.Background(), client, 0, 1, map[string]struct{}{}, contractAddr, messengerABI, db)
func testMonitorBridgeContract(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
previousHeight, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
// deploy mock bridge
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
rc, err := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
assert.NoError(t, err)
rc.Start()
defer rc.Stop()
// Call mock_bridge instance sendMessage to trigger emit events
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message = []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
var latestHeight uint64
latestHeight, err = l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
t.Log("Latest height is", latestHeight)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("Height in Persistence is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
}
func prepareAuth(t *testing.T, client *ethclient.Client, private string) *bind.TransactOpts {
privateKey, err := crypto.HexToECDSA(private)
func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
publicKey := privateKey.Public()
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
assert.True(t, ok)
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
assert.NoError(t, err)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
assert.NoError(t, err)
rc, err := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
assert.NoError(t, err)
rc.Start()
defer rc.Stop()
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
var tx *types.Transaction
for i := 0; i < numTransactions; i++ {
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
}
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
}
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) (*WatcherClient, error) {
return NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(53077))
assert.NoError(t, err)
auth.Nonce = big.NewInt(int64(nonce))
auth.Value = big.NewInt(0) // in wei
auth.GasLimit = uint64(30000000) // in units
auth.GasPrice, err = client.SuggestGasPrice(context.Background())
auth.Value = big.NewInt(0) // in wei
assert.NoError(t, err)
auth.GasPrice, err = l2Cli.SuggestGasPrice(context.Background())
assert.NoError(t, err)
return auth
}

View File

@@ -1,251 +0,0 @@
package mock
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/binary"
"encoding/json"
"io"
"math/big"
"net"
"os"
"testing"
"time"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/gorilla/websocket"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/common/message"
"scroll-tech/coordinator"
coordinator_config "scroll-tech/coordinator/config"
bridge_config "scroll-tech/bridge/config"
"scroll-tech/bridge/l2"
"scroll-tech/common/docker"
docker_db "scroll-tech/database/docker"
)
// PerformHandshake sets up a websocket client to connect to the roller manager.
func PerformHandshake(t *testing.T, c *websocket.Conn) {
// Try to perform handshake
pk, sk := generateKeyPair()
authMsg := &message.AuthMessage{
Identity: message.Identity{
Name: "testRoller",
Timestamp: time.Now().UnixNano(),
PublicKey: common.Bytes2Hex(pk),
},
Signature: "",
}
hash, err := authMsg.Identity.Hash()
assert.NoError(t, err)
sig, err := secp256k1.Sign(hash, sk)
assert.NoError(t, err)
authMsg.Signature = common.Bytes2Hex(sig)
b, err := json.Marshal(authMsg)
assert.NoError(t, err)
msg := &message.Msg{
Type: message.Register,
Payload: b,
}
b, err = json.Marshal(msg)
assert.NoError(t, err)
assert.NoError(t, c.WriteMessage(websocket.BinaryMessage, b))
}
func generateKeyPair() (pubkey, privkey []byte) {
key, err := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader)
if err != nil {
panic(err)
}
pubkey = elliptic.Marshal(secp256k1.S256(), key.X, key.Y)
privkey = make([]byte, 32)
blob := key.D.Bytes()
copy(privkey[32-len(blob):], blob)
return pubkey, privkey
}
// SetupMockVerifier sets up a mocked verifier for a test case.
func SetupMockVerifier(t *testing.T, verifierEndpoint string) {
err := os.RemoveAll(verifierEndpoint)
assert.NoError(t, err)
l, err := net.Listen("unix", verifierEndpoint)
assert.NoError(t, err)
go func() {
conn, err := l.Accept()
assert.NoError(t, err)
// Simply read all incoming messages and send a true boolean straight back.
for {
// Read length
buf := make([]byte, 4)
_, err = io.ReadFull(conn, buf)
assert.NoError(t, err)
// Read message
msgLength := binary.LittleEndian.Uint64(buf)
buf = make([]byte, msgLength)
_, err = io.ReadFull(conn, buf)
assert.NoError(t, err)
// Return boolean
buf = []byte{1}
_, err = conn.Write(buf)
assert.NoError(t, err)
}
}()
}
// L1GethTestConfig is the http and web socket port of l1geth docker
type L1GethTestConfig struct {
HPort int
WPort int
}
// L2GethTestConfig is the http and web socket port of l2geth docker
type L2GethTestConfig struct {
HPort int
WPort int
}
// DbTestconfig is the test config of database docker
type DbTestconfig struct {
DbName string
DbPort int
DB_CONFIG *database.DBConfig
}
// TestConfig is the config for test
type TestConfig struct {
L1GethTestConfig
L2GethTestConfig
DbTestconfig
}
// NewTestL1Docker starts and returns l1geth docker
func NewTestL1Docker(t *testing.T, tcfg *TestConfig) docker.ImgInstance {
img_geth := docker.NewImgGeth(t, "scroll_l1geth", "", "", tcfg.L1GethTestConfig.HPort, tcfg.L1GethTestConfig.WPort)
assert.NoError(t, img_geth.Start())
return img_geth
}
// NewTestL2Docker starts and returns l2geth docker
func NewTestL2Docker(t *testing.T, tcfg *TestConfig) docker.ImgInstance {
img_geth := docker.NewImgGeth(t, "scroll_l2geth", "", "", tcfg.L2GethTestConfig.HPort, tcfg.L2GethTestConfig.WPort)
assert.NoError(t, img_geth.Start())
return img_geth
}
// GetDbDocker starts and returns database docker
func GetDbDocker(t *testing.T, tcfg *TestConfig) docker.ImgInstance {
img_db := docker_db.NewImgDB(t, "postgres", "123456", tcfg.DbName, tcfg.DbPort)
assert.NoError(t, img_db.Start())
return img_db
}
// L2gethDocker return mock l2geth client created with docker for test
func L2gethDocker(t *testing.T, cfg *bridge_config.Config, tcfg *TestConfig) (*l2.Backend, docker.ImgInstance, docker.ImgInstance) {
// initialize l2geth docker image
img_geth := NewTestL2Docker(t, tcfg)
cfg.L2Config.Endpoint = img_geth.Endpoint()
// initialize db docker image
img_db := GetDbDocker(t, tcfg)
db, err := database.NewOrmFactory(&database.DBConfig{
DriverName: "postgres",
DSN: img_db.Endpoint(),
})
assert.NoError(t, err)
client, err := l2.New(context.Background(), cfg.L2Config, db)
assert.NoError(t, err)
return client, img_geth, img_db
}
// SetupRollerManager return coordinator.Manager for testcase
func SetupRollerManager(t *testing.T, cfg *coordinator_config.Config, orm database.OrmFactory) *coordinator.Manager {
// Load config file.
ctx := context.Background()
SetupMockVerifier(t, cfg.RollerManagerConfig.VerifierEndpoint)
rollerManager, err := coordinator.New(ctx, cfg.RollerManagerConfig, orm)
assert.NoError(t, err)
// Start rollermanager modules.
err = rollerManager.Start()
assert.NoError(t, err)
return rollerManager
}
// ClearDB clears db
func ClearDB(t *testing.T, db_cfg *database.DBConfig) {
factory, err := database.NewOrmFactory(db_cfg)
assert.NoError(t, err)
db := factory.GetDB()
version0 := int64(0)
err = migrate.Rollback(db.DB, &version0)
assert.NoError(t, err)
err = migrate.Migrate(db.DB)
assert.NoError(t, err)
err = db.DB.Close()
assert.NoError(t, err)
}
// PrepareDB will return DB for testcase
func PrepareDB(t *testing.T, db_cfg *database.DBConfig) database.OrmFactory {
db, err := database.NewOrmFactory(db_cfg)
assert.NoError(t, err)
return db
}
// SendTxToL2Client will send a default Tx by calling l2geth client
func SendTxToL2Client(t *testing.T, client *ethclient.Client, private string) *types.Transaction {
privateKey, err := crypto.HexToECDSA(private)
assert.NoError(t, err)
publicKey := privateKey.Public()
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
assert.True(t, ok)
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
assert.NoError(t, err)
value := big.NewInt(1000000000) // in wei
gasLimit := uint64(30000000) // in units
gasPrice, err := client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
tx := types.NewTransaction(nonce, toAddress, value, gasLimit, gasPrice, nil)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), privateKey)
assert.NoError(t, err)
assert.NoError(t, client.SendTransaction(context.Background(), signedTx))
return signedTx
}

View File

@@ -0,0 +1,187 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract MockBridgeL1 {
/*********************************
* Events from L1ScrollMessenger *
*********************************/
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
event MessageDropped(bytes32 indexed msgHash);
event RelayedMessage(bytes32 indexed msgHash);
event FailedRelayedMessage(bytes32 indexed msgHash);
/************************
* Events from ZKRollup *
************************/
/// @notice Emitted when a new batch is commited.
/// @param _batchHash The hash of the batch
/// @param _batchIndex The index of the batch
/// @param _parentHash The hash of parent batch
event CommitBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
/// @notice Emitted when a batch is reverted.
/// @param _batchId The identification of the batch.
event RevertBatch(bytes32 indexed _batchId);
/// @notice Emitted when a batch is finalized.
/// @param _batchHash The hash of the batch
/// @param _batchIndex The index of the batch
/// @param _parentHash The hash of parent batch
event FinalizeBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
/***********
* Structs *
***********/
struct L2MessageProof {
uint256 batchIndex;
uint256 blockHeight;
bytes merkleProof;
}
/// @dev The transanction struct
struct Layer2Transaction {
address caller;
uint64 nonce;
address target;
uint64 gas;
uint256 gasPrice;
uint256 value;
bytes data;
// signature
uint256 r;
uint256 s;
uint64 v;
}
/// @dev The block header struct
struct Layer2BlockHeader {
bytes32 blockHash;
bytes32 parentHash;
uint256 baseFee;
bytes32 stateRoot;
uint64 blockHeight;
uint64 gasUsed;
uint64 timestamp;
bytes extraData;
Layer2Transaction[] txs;
}
/// @dev The batch struct, the batch hash is always the last block hash of `blocks`.
struct Layer2Batch {
uint64 batchIndex;
// The hash of the last block in the parent batch
bytes32 parentHash;
Layer2BlockHeader[] blocks;
}
struct Layer2BatchStored {
bytes32 batchHash;
bytes32 parentHash;
uint64 batchIndex;
bool verified;
}
/*************
* Variables *
*************/
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
/// @notice Mapping from batch id to batch struct.
mapping(bytes32 => Layer2BatchStored) public batches;
/************************************
* Functions from L1ScrollMessenger *
************************************/
function sendMessage(
address _to,
uint256 _fee,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
uint256 _nonce = messageNonce;
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
messageNonce += 1;
}
function relayMessageWithProof(
address _from,
address _to,
uint256 _value,
uint256 _fee,
uint256 _deadline,
uint256 _nonce,
bytes memory _message,
L2MessageProof memory
) external {
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
emit RelayedMessage(_msghash);
}
/***************************
* Functions from ZKRollup *
***************************/
function commitBatch(Layer2Batch memory _batch) external {
bytes32 _batchHash = _batch.blocks[_batch.blocks.length - 1].blockHash;
bytes32 _batchId = _computeBatchId(_batchHash, _batch.parentHash, _batch.batchIndex);
Layer2BatchStored storage _batchStored = batches[_batchId];
_batchStored.batchHash = _batchHash;
_batchStored.parentHash = _batch.parentHash;
_batchStored.batchIndex = _batch.batchIndex;
emit CommitBatch(_batchId, _batchHash, _batch.batchIndex, _batch.parentHash);
}
function revertBatch(bytes32 _batchId) external {
emit RevertBatch(_batchId);
}
function finalizeBatchWithProof(
bytes32 _batchId,
uint256[] memory,
uint256[] memory
) external {
Layer2BatchStored storage _batch = batches[_batchId];
uint256 _batchIndex = _batch.batchIndex;
emit FinalizeBatch(_batchId, _batch.batchHash, _batchIndex, _batch.parentHash);
}
/// @dev Internal function to compute a unique batch id for mapping.
/// @param _batchHash The hash of the batch.
/// @param _parentHash The hash of the batch.
/// @param _batchIndex The index of the batch.
/// @return Return the computed batch id.
function _computeBatchId(
bytes32 _batchHash,
bytes32 _parentHash,
uint256 _batchIndex
) internal pure returns (bytes32) {
return keccak256(abi.encode(_batchHash, _parentHash, _batchIndex));
}
}

View File

@@ -0,0 +1,67 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract MockBridgeL2 {
/*********************************
* Events from L2ScrollMessenger *
*********************************/
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
event MessageDropped(bytes32 indexed msgHash);
event RelayedMessage(bytes32 indexed msgHash);
event FailedRelayedMessage(bytes32 indexed msgHash);
/*************
* Variables *
*************/
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
/************************************
* Functions from L2ScrollMessenger *
************************************/
function sendMessage(
address _to,
uint256 _fee,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
uint256 _nonce = messageNonce;
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
messageNonce = _nonce + 1;
}
function relayMessageWithProof(
address _from,
address _to,
uint256 _value,
uint256 _fee,
uint256 _deadline,
uint256 _nonce,
bytes memory _message
) external {
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
emit RelayedMessage(_msghash);
}
}

View File

@@ -1,42 +0,0 @@
//SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract Mock_Bridge {
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
function sendMessage(
address _to,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
// @todo compute fee
uint256 _fee = 0;
uint256 _nonce = messageNonce;
require(msg.value >= _fee, "cannot pay fee");
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
unchecked {
messageNonce = _nonce + 1;
}
}
}

View File

@@ -0,0 +1,169 @@
package sender
import (
"context"
"crypto/ecdsa"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
)
type accountPool struct {
client *ethclient.Client
minBalance *big.Int
accounts map[common.Address]*bind.TransactOpts
accsCh chan *bind.TransactOpts
}
// newAccounts creates an accountPool instance.
func newAccountPool(ctx context.Context, minBalance *big.Int, client *ethclient.Client, privs []*ecdsa.PrivateKey) (*accountPool, error) {
if minBalance == nil {
minBalance = big.NewInt(0)
minBalance.SetString("100000000000000000000", 10)
}
accs := &accountPool{
client: client,
minBalance: minBalance,
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
}
// get chainID from client
chainID, err := client.ChainID(ctx)
if err != nil {
return nil, err
}
for _, privStr := range privs {
auth, err := bind.NewKeyedTransactorWithChainID(privStr, chainID)
if err != nil {
log.Error("failed to create account", "chainID", chainID.String(), "err", err)
return nil, err
}
// Set pending nonce
nonce, err := client.PendingNonceAt(ctx, auth.From)
if err != nil {
return nil, err
}
auth.Nonce = big.NewInt(int64(nonce))
accs.accounts[auth.From] = auth
accs.accsCh <- auth
}
return accs, accs.checkAndSetBalances(ctx)
}
// getAccount get auth from channel.
func (a *accountPool) getAccount() *bind.TransactOpts {
select {
case auth := <-a.accsCh:
return auth
default:
return nil
}
}
// releaseAccount set used auth into channel.
func (a *accountPool) releaseAccount(auth *bind.TransactOpts) {
a.accsCh <- auth
}
// reSetNonce reset nonce if send signed tx failed.
func (a *accountPool) resetNonce(ctx context.Context, auth *bind.TransactOpts) {
nonce, err := a.client.PendingNonceAt(ctx, auth.From)
if err != nil {
log.Warn("failed to reset nonce", "address", auth.From.String(), "err", err)
return
}
auth.Nonce = big.NewInt(int64(nonce))
}
// checkAndSetBalance check balance and set min balance.
func (a *accountPool) checkAndSetBalances(ctx context.Context) error {
var (
root *bind.TransactOpts
maxBls = big.NewInt(0)
lostAuths []*bind.TransactOpts
)
for addr, auth := range a.accounts {
bls, err := a.client.BalanceAt(ctx, addr, nil)
if err != nil || bls.Cmp(a.minBalance) < 0 {
if err != nil {
log.Warn("failed to get balance", "address", addr.String(), "err", err)
return err
}
lostAuths = append(lostAuths, auth)
continue
} else if bls.Cmp(maxBls) > 0 { // Find the biggest balance account.
root, maxBls = auth, bls
}
}
if root == nil {
return fmt.Errorf("no account has enough balance")
}
if len(lostAuths) == 0 {
return nil
}
var (
tx *types.Transaction
err error
)
for _, auth := range lostAuths {
tx, err = a.createSignedTx(root, &auth.From, a.minBalance)
if err != nil {
return err
}
err = a.client.SendTransaction(ctx, tx)
if err != nil {
log.Error("Failed to send balance to account", "err", err)
return err
}
log.Debug("send balance to account", "account", auth.From.String(), "balance", a.minBalance.String())
}
// wait util mined
if _, err = bind.WaitMined(ctx, a.client, tx); err != nil {
return err
}
// Reset root's nonce.
a.resetNonce(ctx, root)
return nil
}
func (a *accountPool) createSignedTx(from *bind.TransactOpts, to *common.Address, value *big.Int) (*types.Transaction, error) {
gasPrice, err := a.client.SuggestGasPrice(context.Background())
if err != nil {
return nil, err
}
gasPrice.Mul(gasPrice, big.NewInt(2))
// Get pending nonce
nonce, err := a.client.PendingNonceAt(context.Background(), from.From)
if err != nil {
return nil, err
}
tx := types.NewTx(&types.LegacyTx{
Nonce: nonce,
To: to,
Value: value,
Gas: 500000,
GasPrice: gasPrice,
})
signedTx, err := from.Signer(from.From, tx)
if err != nil {
return nil, err
}
return signedTx, nil
}

View File

@@ -3,14 +3,16 @@ package sender
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"math/big"
"reflect"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/scroll-tech/go-ethereum"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
@@ -32,6 +34,11 @@ const (
LegacyTxType = "LegacyTx"
)
var (
// ErrNoAvailableAccount indicates no available account error in the account pool.
ErrNoAvailableAccount = errors.New("sender has no available account to send transaction")
)
// DefaultSenderConfig The default config
var DefaultSenderConfig = config.SenderConfig{
Endpoint: "",
@@ -63,6 +70,7 @@ type PendingTransaction struct {
submitAt uint64
id string
feeData *FeeData
signer *bind.TransactOpts
tx *types.Transaction
}
@@ -73,20 +81,20 @@ type Sender struct {
chainID *big.Int // The chain id of the endpoint
ctx context.Context
mu sync.Mutex
auth *bind.TransactOpts
// account fields.
auths *accountPool
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingTxs sync.Map // Mapping from nonce to pending transaction
confirmCh chan *Confirmation
sendTxErrCh chan error
stopCh chan struct{}
}
// NewSender returns a new instance of transaction sender
// txConfirmationCh is used to notify confirmed transaction
func NewSender(ctx context.Context, config *config.SenderConfig, prv *ecdsa.PrivateKey) (*Sender, error) {
func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.PrivateKey) (*Sender, error) {
if config == nil {
config = &DefaultSenderConfig
}
@@ -101,20 +109,10 @@ func NewSender(ctx context.Context, config *config.SenderConfig, prv *ecdsa.Priv
return nil, err
}
auth, err := bind.NewKeyedTransactorWithChainID(prv, chainID)
auths, err := newAccountPool(ctx, config.MinBalance, client, privs)
if err != nil {
log.Error("failed to create account", "chainID", chainID.String(), "err", err)
return nil, err
return nil, fmt.Errorf("failed to create account pool, err: %v", err)
}
log.Info("sender", "chainID", chainID.String(), "address", auth.From.String())
// set nonce
nonce, err := client.PendingNonceAt(ctx, auth.From)
if err != nil {
log.Error("failed to get pending nonce", "address", auth.From.String(), "err", err)
return nil, err
}
auth.Nonce = big.NewInt(int64(nonce))
// get header by number
header, err := client.HeaderByNumber(ctx, nil)
@@ -127,15 +125,15 @@ func NewSender(ctx context.Context, config *config.SenderConfig, prv *ecdsa.Priv
config: config,
client: client,
chainID: chainID,
auth: auth,
sendTxErrCh: make(chan error, 4),
auths: auths,
confirmCh: make(chan *Confirmation, 128),
blockNumber: header.Number.Uint64(),
baseFeePerGas: header.BaseFee.Uint64(),
pendingTxs: sync.Map{},
stopCh: make(chan struct{}),
}
go sender.loop()
go sender.loop(ctx)
return sender, nil
}
@@ -151,8 +149,14 @@ func (s *Sender) ConfirmChan() <-chan *Confirmation {
return s.confirmCh
}
func (s *Sender) getFeeData(msg ethereum.CallMsg) (*FeeData, error) {
gasLimit, err := s.client.EstimateGas(s.ctx, msg)
// NumberOfAccounts return the count of accounts.
func (s *Sender) NumberOfAccounts() int {
return len(s.auths.accounts)
}
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {
// estimate gas limit
gasLimit, err := s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
if err != nil {
return nil, err
}
@@ -186,35 +190,38 @@ func (s *Sender) getFeeData(msg ethereum.CallMsg) (*FeeData, error) {
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte) (hash common.Hash, err error) {
if _, ok := s.pendingTxs.Load(ID); ok {
// We occupy the ID, in case some other threads call with the same ID in the same time
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
}
var (
// estimate gas limit
call = ethereum.CallMsg{
From: s.auth.From,
To: target,
Gas: 0,
GasPrice: nil,
GasFeeCap: nil,
GasTipCap: nil,
Value: value,
Data: data,
AccessList: make(types.AccessList, 0),
// get
auth := s.auths.getAccount()
if auth == nil {
s.pendingTxs.Delete(ID) // release the ID on failure
return common.Hash{}, ErrNoAvailableAccount
}
defer s.auths.releaseAccount(auth)
defer func() {
if err != nil {
s.pendingTxs.Delete(ID) // release the ID on failure
}
}()
var (
feeData *FeeData
tx *types.Transaction
)
// estimate gas fee
if feeData, err = s.getFeeData(call); err != nil {
if feeData, err = s.getFeeData(auth, target, value, data); err != nil {
return
}
if tx, err = s.createAndSendTx(feeData, target, value, data); err == nil {
if tx, err = s.createAndSendTx(auth, feeData, target, value, data, nil); err == nil {
// add pending transaction to queue
pending := &PendingTransaction{
tx: tx,
id: ID,
signer: auth,
submitAt: atomic.LoadUint64(&s.blockNumber),
feeData: feeData,
}
@@ -225,14 +232,17 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
return
}
func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value *big.Int, data []byte) (tx *types.Transaction, err error) {
s.mu.Lock()
defer s.mu.Unlock()
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (tx *types.Transaction, err error) {
var (
nonce = s.auth.Nonce.Uint64()
nonce = auth.Nonce.Uint64()
txData types.TxData
)
// this is a resubmit call, override the nonce
if overrideNonce != nil {
nonce = *overrideNonce
}
// lock here to avoit blocking when call `SuggestGasPrice`
switch s.config.TxType {
case LegacyTxType:
@@ -280,25 +290,29 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value
}
// sign and send
tx, err = s.auth.Signer(s.auth.From, types.NewTx(txData))
tx, err = auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
log.Error("failed to sign tx", "err", err)
return
}
if err = s.client.SendTransaction(s.ctx, tx); err != nil {
log.Error("failed to send tx", "tx hash", tx.Hash().String(), "err", err)
s.sendTxErrCh <- err
// Check if contain nonce, and reset nonce
// only reset nonce when it is not from resubmit
if strings.Contains(err.Error(), "nonce") && overrideNonce == nil {
s.auths.resetNonce(context.Background(), auth)
}
return
}
// update nonce
s.auth.Nonce = big.NewInt(int64(nonce + 1))
// update nonce when it is not from resubmit
if overrideNonce == nil {
auth.Nonce = big.NewInt(int64(nonce + 1))
}
return
}
func (s *Sender) resubmitTransaction(feeData *FeeData, tx *types.Transaction) (*types.Transaction, error) {
// @todo move query out of lock scope
func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts, tx *types.Transaction) (*types.Transaction, error) {
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
@@ -334,7 +348,8 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, tx *types.Transaction) (*
feeData.gasTipCap = gasTipCap
}
return s.createAndSendTx(feeData, tx.To(), tx.Value(), tx.Data())
nonce := tx.Nonce()
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
}
// CheckPendingTransaction Check pending transaction given number of blocks to wait before confirmation.
@@ -343,6 +358,11 @@ func (s *Sender) CheckPendingTransaction(header *types.Header) {
atomic.StoreUint64(&s.blockNumber, number)
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
s.pendingTxs.Range(func(key, value interface{}) bool {
// ignore empty id, since we use empty id to occupy pending task
if value == nil || reflect.ValueOf(value).IsNil() {
return true
}
pending := value.(*PendingTransaction)
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
@@ -357,9 +377,43 @@ func (s *Sender) CheckPendingTransaction(header *types.Header) {
}
} else if s.config.EscalateBlocks+pending.submitAt < number {
var tx *types.Transaction
tx, err := s.resubmitTransaction(pending.feeData, pending.tx)
tx, err := s.resubmitTransaction(pending.feeData, pending.signer, pending.tx)
if err != nil {
log.Error("failed to resubmit transaction, reset submitAt", "tx hash", pending.tx.Hash().String(), "err", err)
// If account pool is empty, it will try again in next loop.
if !errors.Is(err, ErrNoAvailableAccount) {
log.Error("failed to resubmit transaction, reset submitAt", "tx hash", pending.tx.Hash().String(), "err", err)
}
// This means one of the old transactions is confirmed
// One scenario is
// 1. Initially, we replace the tx three times and submit it to local node.
// Currently, we keep the last tx hash in the memory.
// 2. Other node packed the 2-nd tx or 3-rd tx, and the local node has received the block now.
// 3. When we resubmit the 4-th tx, we got a nonce error.
// 4. We need to check the status of 3-rd tx stored in our memory
// 4.1 If the 3-rd tx is packed, we got a receipt and 3-nd is marked as confirmed.
// 4.2 If the 2-nd tx is packed, we got nothing from `TransactionReceipt` call. Since we
// cannot do anything about, we just log some information. In this case, the caller
// of `sender.SendTransaction` should write extra code to handle the situation.
// Another scenario is private key leaking and someone send a transaction with the same nonce.
// We need to stop the program and manually handle the situation.
if strings.Contains(err.Error(), "nonce") {
// This key can be deleted
s.pendingTxs.Delete(key)
// Try get receipt by the latest replaced tx hash
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
// send confirm message
s.confirmCh <- &Confirmation{
ID: pending.id,
IsSuccessful: receipt.Status == types.ReceiptStatusSuccessful,
TxHash: pending.tx.Hash(),
}
} else {
// The receipt can be nil since the confirmed transaction may not be the latest one.
// We just ignore it, the caller of the sender pool should handle this situation.
log.Warn("Pending transaction is confirmed by one of the replaced transactions", "key", key, "signer", pending.signer.From, "nonce", pending.tx.Nonce())
}
}
} else {
// flush submitAt
pending.tx = tx
@@ -371,10 +425,13 @@ func (s *Sender) CheckPendingTransaction(header *types.Header) {
}
// Loop is the main event loop
func (s *Sender) loop() {
t := time.Duration(s.config.CheckPendingTime) * time.Second
checkTick := time.NewTicker(t)
func (s *Sender) loop(ctx context.Context) {
checkTick := time.NewTicker(time.Duration(s.config.CheckPendingTime) * time.Second)
defer checkTick.Stop()
checkBalanceTicker := time.NewTicker(time.Minute * 10)
defer checkBalanceTicker.Stop()
for {
select {
case <-checkTick.C:
@@ -384,17 +441,11 @@ func (s *Sender) loop() {
continue
}
s.CheckPendingTransaction(header)
case err := <-s.sendTxErrCh:
// redress nonce
if strings.Contains(err.Error(), "nonce") {
if nonce, err := s.client.PendingNonceAt(s.ctx, s.auth.From); err != nil {
log.Error("failed to get pending nonce", "address", s.auth.From.String(), "err", err)
} else {
s.mu.Lock()
s.auth.Nonce = big.NewInt(int64(nonce))
s.mu.Unlock()
}
}
case <-checkBalanceTicker.C:
// Check and set balance.
_ = s.auths.checkAndSetBalances(ctx)
case <-ctx.Done():
return
case <-s.stopCh:
return
}

View File

@@ -3,6 +3,7 @@ package sender_test
import (
"context"
"crypto/ecdsa"
"errors"
"math/big"
"strconv"
"testing"
@@ -18,102 +19,107 @@ import (
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock"
"scroll-tech/bridge/sender"
)
const TX_BATCH = 100
const TXBatch = 50
var (
TestConfig = &mock.TestConfig{
L1GethTestConfig: mock.L1GethTestConfig{
HPort: 0,
WPort: 8576,
},
}
l1gethImg docker.ImgInstance
private *ecdsa.PrivateKey
privateKeys []*ecdsa.PrivateKey
cfg *config.Config
l2gethImg docker.ImgInstance
)
func setupEnv(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
var err error
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
prv, err := crypto.HexToECDSA(cfg.L2Config.RelayerConfig.PrivateKey)
priv, err := crypto.HexToECDSA("1212121212121212121212121212121212121212121212121212121212121212")
assert.NoError(t, err)
private = prv
l1gethImg = mock.NewTestL1Docker(t, TestConfig)
// Load default private key.
privateKeys = []*ecdsa.PrivateKey{priv}
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
}
func TestFunction(t *testing.T) {
func TestSender(t *testing.T) {
// Setup
setupEnv(t)
t.Run("test Run sender", func(t *testing.T) {
// set config
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
// create newSender
newSender, err := sender.NewSender(context.Background(), cfg.L2Config.RelayerConfig.SenderConfig, private)
assert.NoError(t, err)
defer newSender.Stop()
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
t.Run("test 3 account sender", func(t *testing.T) { testBatchSender(t, 3) })
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
assert.NoError(t, err)
// send transactions
idCache := cmap.New()
confirmCh := newSender.ConfirmChan()
var (
eg errgroup.Group
errCh chan error
)
go func() {
for i := 0; i < TX_BATCH; i++ {
//toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
toAddr := common.BigToAddress(big.NewInt(int64(i + 1000)))
id := strconv.Itoa(i + 1000)
eg.Go(func() error {
txHash, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
if err != nil {
t.Error("failed to send tx", "err", err)
return err
}
t.Log("successful send a tx", "ID", id, "tx hash", txHash.String())
idCache.Set(id, struct{}{})
return nil
})
}
errCh <- eg.Wait()
}()
// avoid 10 mins cause testcase panic
after := time.After(60 * time.Second)
for {
select {
case cmsg := <-confirmCh:
t.Log("get confirmations of", "ID: ", cmsg.ID, "status: ", cmsg.IsSuccessful)
assert.Equal(t, true, cmsg.IsSuccessful)
_, exist := idCache.Pop(cmsg.ID)
assert.Equal(t, true, exist)
// Receive all confirmed txs.
if idCache.Count() == 0 {
return
}
case err := <-errCh:
if err != nil {
t.Errorf("failed to send tx, err: %v", err)
return
}
assert.NoError(t, err)
case <-after:
t.Logf("newSender test failed because timeout")
t.FailNow()
}
}
})
// Teardown
t.Cleanup(func() {
assert.NoError(t, l1gethImg.Stop())
assert.NoError(t, l2gethImg.Stop())
})
}
func testBatchSender(t *testing.T, batchSize int) {
for len(privateKeys) < batchSize {
priv, err := crypto.GenerateKey()
if err != nil {
t.Fatal(err)
}
privateKeys = append(privateKeys, priv)
}
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
senderCfg.Confirmations = 0
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
if err != nil {
t.Fatal(err)
}
defer newSender.Stop()
// send transactions
var (
eg errgroup.Group
idCache = cmap.New()
confirmCh = newSender.ConfirmChan()
)
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
index := idx
eg.Go(func() error {
for i := 0; i < TXBatch; i++ {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
if errors.Is(err, sender.ErrNoAvailableAccount) {
<-time.After(time.Second)
continue
}
if err != nil {
return err
}
idCache.Set(id, struct{}{})
}
return nil
})
}
if err := eg.Wait(); err != nil {
t.Error(err)
}
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
// avoid 10 mins cause testcase panic
after := time.After(80 * time.Second)
for {
select {
case cmsg := <-confirmCh:
assert.Equal(t, true, cmsg.IsSuccessful)
_, exist := idCache.Pop(cmsg.ID)
assert.Equal(t, true, exist)
// Receive all confirmed txs.
if idCache.Count() == 0 {
return
}
case <-after:
t.Error("newSender test failed because of timeout")
return
}
}
}

212
bridge/tests/bridge_test.go Normal file
View File

@@ -0,0 +1,212 @@
package tests
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/common/docker"
)
var (
// config
cfg *config.Config
// private key
privateKey *ecdsa.PrivateKey
// docker consider handler.
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
redisImg docker.ImgInstance
// clients
l1Client *ethclient.Client
l2Client *ethclient.Client
// auth
l1Auth *bind.TransactOpts
l2Auth *bind.TransactOpts
// l1 messenger contract
l1MessengerInstance *mock_bridge.MockBridgeL1
l1MessengerAddress common.Address
// l1 rollup contract
l1RollupInstance *mock_bridge.MockBridgeL1
l1RollupAddress common.Address
// l2 messenger contract
l2MessengerInstance *mock_bridge.MockBridgeL2
l2MessengerAddress common.Address
)
func setupEnv(t *testing.T) {
var err error
privateKey, err = crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
assert.NoError(t, err)
messagePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121213"))
assert.NoError(t, err)
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
assert.NoError(t, err)
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L1Config.Confirmations = 0
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L2Config.Confirmations = 0
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
// Create l2geth container.
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.Persistence.DriverName)
cfg.DBConfig.Persistence.DSN = dbImg.Endpoint()
redisImg = docker.NewTestRedisDocker(t)
cfg.DBConfig.Redis.URL = redisImg.Endpoint()
// Create l1geth and l2geth client.
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)
assert.NoError(t, err)
l2Client, err = ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
// Create l1 and l2 auth
l1Auth = prepareAuth(t, l1Client, privateKey)
l2Auth = prepareAuth(t, l2Client, privateKey)
// send some balance to message and rollup sender
transferEther(t, l1Auth, l1Client, messagePrivateKey)
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
transferEther(t, l2Auth, l2Client, messagePrivateKey)
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
}
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
targetAddress := crypto.PubkeyToAddress(privateKey.PublicKey)
gasPrice, err := client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
gasPrice.Mul(gasPrice, big.NewInt(2))
// Get pending nonce
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
assert.NoError(t, err)
// 200 ether should be enough
value, ok := big.NewInt(0).SetString("0xad78ebc5ac6200000", 0)
assert.Equal(t, ok, true)
tx := types.NewTx(&types.LegacyTx{
Nonce: nonce,
To: &targetAddress,
Value: value,
Gas: 500000,
GasPrice: gasPrice,
})
signedTx, err := auth.Signer(auth.From, tx)
assert.NoError(t, err)
err = client.SendTransaction(context.Background(), signedTx)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), client, signedTx)
assert.NoError(t, err)
if receipt.Status != types.ReceiptStatusSuccessful {
t.Fatalf("Call failed")
}
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if l1gethImg != nil {
assert.NoError(t, l1gethImg.Stop())
}
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
if redisImg != nil {
assert.NoError(t, redisImg.Stop())
}
}
func prepareContracts(t *testing.T) {
var err error
var tx *types.Transaction
// L1 messenger contract
_, tx, l1MessengerInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
assert.NoError(t, err)
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
// L1 rollup contract
_, tx, l1RollupInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
assert.NoError(t, err)
l1RollupAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
// L2 messenger contract
_, tx, l2MessengerInstance, err = mock_bridge.DeployMockBridgeL2(l2Auth, l2Client)
assert.NoError(t, err)
l2MessengerAddress, err = bind.WaitDeployed(context.Background(), l2Client, tx)
assert.NoError(t, err)
cfg.L1Config.L1MessengerAddress = l1MessengerAddress
cfg.L1Config.RollupContractAddress = l1RollupAddress
cfg.L1Config.RelayerConfig.MessengerContractAddress = l2MessengerAddress
cfg.L2Config.L2MessengerAddress = l2MessengerAddress
cfg.L2Config.RelayerConfig.MessengerContractAddress = l1MessengerAddress
cfg.L2Config.RelayerConfig.RollupContractAddress = l1RollupAddress
}
func prepareAuth(t *testing.T, client *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID)
assert.NoError(t, err)
auth.Value = big.NewInt(0) // in wei
assert.NoError(t, err)
return auth
}
func TestFunction(t *testing.T) {
setupEnv(t)
// l1 rollup and watch rollup events
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
// l2 message
t.Run("testRelayL2MessageSucceed", testRelayL2MessageSucceed)
t.Cleanup(func() {
free(t)
})
}

View File

@@ -0,0 +1,177 @@
package tests
import (
"context"
"math/big"
"sync"
"testing"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
func testRelayL2MessageSucceed(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
var wg sync.WaitGroup
wg.Add(3)
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L2Watcher
l2Watcher, err := l2.NewL2WatcherClient(context.Background(), l2Client, 0, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
assert.NoError(t, err)
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
// send message through l2 messenger contract
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
assert.NoError(t, err)
sendTx, err := l2MessengerInstance.SendMessage(l2Auth, l1Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
assert.NoError(t, err)
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
assert.NoError(t, err)
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// l2 watch process events
l2Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
// check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgPending)
assert.Equal(t, msg.Sender, l2Auth.From.String())
assert.Equal(t, msg.Target, l1Auth.From.String())
// add fake blocks
traces := []*types.BlockTrace{
{
Header: &types.Header{
Number: sendReceipt.BlockNumber,
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
StorageTrace: &types.StorageTrace{},
},
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
// add fake batch
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676)
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[0].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
// process l2 messages
l2Relayer.ProcessSavedEvents(&wg)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgSubmitted)
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid)
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
assert.NoError(t, err)
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgConfirmed)
}

139
bridge/tests/rollup_test.go Normal file
View File

@@ -0,0 +1,139 @@
package tests
import (
"context"
"math/big"
"sync"
"testing"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
// add some blocks to db
var traces []*types.BlockTrace
var parentHash common.Hash
for i := 1; i <= 10; i++ {
header := types.Header{
Number: big.NewInt(int64(i)),
ParentHash: parentHash,
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
}
traces = append(traces, &types.BlockTrace{
Header: &header,
StorageTrace: &types.StorageTrace{},
})
parentHash = header.Hash()
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
// add one batch to db
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[1].Header.Number.Uint64(),
Hash: traces[1].Header.Hash().String(),
ParentHash: traces[1].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[1].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
wg.Wait()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
wg.Add(1)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
wg.Wait()
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
}

69
bridge/utils/utils.go Normal file
View File

@@ -0,0 +1,69 @@
package utils
import (
"bytes"
"math/big"
"github.com/iden3/go-iden3-crypto/keccak256"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
)
// Keccak2 compute the keccack256 of two concatenations of bytes32
func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(keccak256.Hash(append(a.Bytes()[:], b.Bytes()[:]...)))
}
func encodePacked(input ...[]byte) []byte {
return bytes.Join(input, nil)
}
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
sender common.Address,
target common.Address,
value *big.Int,
fee *big.Int,
deadline *big.Int,
message []byte,
messageNonce *big.Int,
) common.Hash {
packed := encodePacked(
sender.Bytes(),
target.Bytes(),
math.U256Bytes(value),
math.U256Bytes(fee),
math.U256Bytes(deadline),
math.U256Bytes(messageNonce),
message,
)
return common.BytesToHash(keccak256.Hash(packed))
}
// BufferToUint256Be convert bytes array to uint256 array assuming big-endian
func BufferToUint256Be(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
buffer256[i] = big.NewInt(0)
for j := 0; j < 32; j++ {
buffer256[i] = buffer256[i].Lsh(buffer256[i], 8)
buffer256[i] = buffer256[i].Add(buffer256[i], big.NewInt(int64(buffer[i*32+j])))
}
}
return buffer256
}
// BufferToUint256Le convert bytes array to uint256 array assuming little-endian
func BufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
v := big.NewInt(0)
shft := big.NewInt(1)
for j := 0; j < 32; j++ {
v = new(big.Int).Add(v, new(big.Int).Mul(shft, big.NewInt(int64(buffer[i*32+j]))))
shft = new(big.Int).Mul(shft, big.NewInt(256))
}
buffer256[i] = v
}
return buffer256
}

View File

@@ -0,0 +1,41 @@
package utils_test
import (
"math/big"
"testing"
"scroll-tech/bridge/utils"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
hash := utils.Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
if hash != common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5", hash.Hex())
}
hash = utils.Keccak2(common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"))
if hash != common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", hash.Hex())
}
hash = utils.Keccak2(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
if hash != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
t.Fatalf("Invalid keccak, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", hash.Hex())
}
}
func TestComputeMessageHash(t *testing.T) {
hash := utils.ComputeMessageHash(
common.HexToAddress("0xd7227113b92e537aeda220d5a2f201b836e5879d"),
common.HexToAddress("0x47c02b023b6787ef4e503df42bbb1a94f451a1c0"),
big.NewInt(5000000000000000),
big.NewInt(0),
big.NewInt(1674204924),
common.Hex2Bytes("8eaac8a30000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000000000000000000000000000000011c37937e0800000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000"),
big.NewInt(30706),
)
assert.Equal(t, hash.String(), "0x920e59f62ca89a0f481d44961c55d299dd20c575693692d61fdf3ca579d8edf3")
}

View File

@@ -179,6 +179,13 @@ linters:
- depguard
- gocyclo
- unparam
- exportloopref
- sqlclosecheck
- rowserrcheck
- durationcheck
- bidichk
- typecheck
- unused
enable-all: false
disable:
@@ -200,16 +207,7 @@ issues:
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- gocyclo
- errcheck
- dupl
- gosec
# Exclude known linters from partially hard-vendored code,
# which is impossible to exclude via "nolint" comments.
- path: internal/hmac/
text: "weak cryptographic primitive"
linters:
- gosec
# Exclude some staticcheck messages
@@ -217,18 +215,6 @@ issues:
- staticcheck
text: "SA9003:"
- linters:
- golint
text: "package comment should be of the form"
- linters:
- golint
text: "don't use ALL_CAPS in Go names;"
- linters:
- golint
text: "don't use underscores in Go names;"
# Exclude lll issues for long lines with go:generate
- linters:
- lll

View File

@@ -1,13 +1,26 @@
# Build bridge in a stock Go builder container
FROM scrolltech/go-builder:1.18 as builder
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
COPY ./ /
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
RUN cd /bridge/cmd && go build -v -p 4 -o bridge
# Build bridge
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd && go build -v -p 4 -o /bin/bridge
# Pull bridge into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bridge/cmd/bridge /bin/
COPY --from=builder /bin/bridge /bin/
ENTRYPOINT ["bridge"]

View File

@@ -1,7 +1,5 @@
assets/
coordinator/
docs/
integration-test/
l2geth/
roller/
rpc-gateway/
*target/*

View File

@@ -1,13 +1,43 @@
# Build scroll in a stock Go builder container
FROM scrolltech/go-builder:1.18 as builder
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as chef
WORKDIR app
COPY ./ /
FROM chef as planner
COPY ./common/libzkp/impl/ .
RUN cargo chef prepare --recipe-path recipe.json
RUN cd /coordinator/cmd && go build -v -p 4 -o coordinator
FROM chef as zkp-builder
COPY ./common/libzkp/impl/rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
# Pull scroll into a second stage deploy alpine container
FROM alpine:latest
COPY ./common/libzkp/impl .
RUN cargo build --release
COPY --from=builder /coordinator/cmd/coordinator /bin/
ENTRYPOINT ["coordinator"]
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build coordinator
FROM base as builder
COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.a ./coordinator/verifier/lib/
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
COPY --from=builder /bin/coordinator /bin/
ENTRYPOINT ["/bin/coordinator"]

View File

@@ -1,8 +1,6 @@
assets/
bridge/
contracts/
docs/
integration-test/
l2geth/
roller/
rpc-gateway/
*target/*

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.18 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build db_cli
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/database/cmd && go build -v -p 4 -o /bin/db_cli
# Pull db_cli into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/db_cli /bin/
ENTRYPOINT ["db_cli"]

View File

@@ -0,0 +1,6 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -2,11 +2,15 @@ GO_VERSION := 1.18
PYTHON_VERSION := 3.10
RUST_VERSION := nightly-2022-08-23
.PHONY: all go-builder rust-builder rust-alpine-builder go-rust-builder py-runner
.PHONY: all go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
go-builder:
docker build -t scrolltech/go-builder:latest -f go-builder.Dockerfile ./
docker image tag scrolltech/go-builder:latest scrolltech/go-builder:$(GO_VERSION)
go-rust-builder:
docker build -t scrolltech/go-rust-builder:latest -f go-rust-builder.Dockerfile ./
docker image tag scrolltech/go-rust-builder:latest scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
go-alpine-builder:
docker build -t scrolltech/go-alpine-builder:latest -f go-alpine-builder.Dockerfile ./
docker image tag scrolltech/go-alpine-builder:latest scrolltech/go-alpine-builder:$(GO_VERSION)
rust-builder:
docker build -t scrolltech/rust-builder:latest -f rust-builder.Dockerfile ./
@@ -16,23 +20,25 @@ rust-alpine-builder:
docker build -t scrolltech/rust-alpine-builder:latest -f rust-alpine-builder.Dockerfile ./
docker image tag scrolltech/rust-alpine-builder:latest scrolltech/rust-alpine-builder:$(RUST_VERSION)
go-rust-builder:
docker build -t scrolltech/go-rust-builder:latest -f go-rust-builder.Dockerfile ./
docker image tag scrolltech/go-rust-builder:latest scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
go-rust-alpine-builder:
docker build -t scrolltech/go-rust-alpine-builder:latest -f go-rust-alpine-builder.Dockerfile ./
docker image tag scrolltech/go-rust-alpine-builder:latest scrolltech/go-rust-alpine-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
py-runner:
docker build -t scrolltech/py-runner:latest -f py-runner.Dockerfile ./
docker image tag scrolltech/py-runner:latest scrolltech/py-runner:$(PYTHON_VERSION)
all: go-builder rust-builder rust-alpine-builder go-rust-builder py-runner
all: go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
publish:
docker push scrolltech/go-builder:latest
docker push scrolltech/go-builder:$(GO_VERSION)
docker push scrolltech/go-alpine-builder:latest
docker push scrolltech/go-alpine-builder:$(GO_VERSION)
docker push scrolltech/rust-builder:latest
docker push scrolltech/rust-builder:$(RUST_VERSION)
docker push scrolltech/rust-alpine-builder:latest
docker push scrolltech/rust-alpine-builder:$(RUST_VERSION)
docker push scrolltech/go-rust-alpine-builder:latest
docker push scrolltech/go-rust-alpine-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
docker push scrolltech/go-rust-builder:latest
docker push scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
docker push scrolltech/py-runner:latest

View File

@@ -0,0 +1,35 @@
FROM golang:1.18-alpine
ARG CARGO_CHEF_TAG=0.1.41
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
# RUN apk add --no-cache libc6-compat
# RUN apk add --no-cache gcompat
ENV RUSTUP_HOME=/usr/local/rustup \
CARGO_HOME=/usr/local/cargo \
PATH=/usr/local/cargo/bin:$PATH \
CARGO_NET_GIT_FETCH_WITH_CLI=true
RUN set -eux; \
apkArch="$(apk --print-arch)"; \
case "$apkArch" in \
x86_64) rustArch='x86_64-unknown-linux-musl' ;; \
aarch64) rustArch='aarch64-unknown-linux-musl' ;; \
*) echo >&2 "unsupported architecture: $apkArch"; exit 1 ;; \
esac; \
\
url="https://static.rust-lang.org/rustup/dist/${rustArch}/rustup-init"; \
wget "$url"; \
chmod +x rustup-init;
RUN ./rustup-init -y --no-modify-path --default-toolchain ${DEFAULT_RUST_TOOLCHAIN}; \
rm rustup-init; \
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
rustup --version; \
cargo --version; \
rustc --version;
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
&& rm -rf $CARGO_HOME/registry/

View File

@@ -1,31 +1,34 @@
FROM golang:1.18-alpine
ARG CARGO_CHEF_TAG=0.1.41
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
FROM ubuntu:20.04
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
RUN apt-get update && ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime
ENV RUSTUP_HOME=/usr/local/rustup \
CARGO_HOME=/usr/local/cargo \
PATH=/usr/local/cargo/bin:$PATH
# Install basic packages
RUN apt-get install build-essential curl wget git pkg-config -y
RUN set -eux; \
apkArch="$(apk --print-arch)"; \
case "$apkArch" in \
x86_64) rustArch='x86_64-unknown-linux-musl' ;; \
aarch64) rustArch='aarch64-unknown-linux-musl' ;; \
*) echo >&2 "unsupported architecture: $apkArch"; exit 1 ;; \
esac; \
\
url="https://static.rust-lang.org/rustup/dist/${rustArch}/rustup-init"; \
wget "$url"; \
chmod +x rustup-init;
# Install dev-packages
RUN apt-get install libclang-dev libssl-dev llvm -y
RUN ./rustup-init -y --no-modify-path --default-toolchain ${DEFAULT_RUST_TOOLCHAIN}; \
rm rustup-init; \
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
rustup --version; \
cargo --version; \
rustc --version;
# Install Rust
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
ENV CARGO_HOME=/root/.cargo
# Add Toolchain
RUN rustup toolchain install nightly-2022-08-23
# TODO: make this ARG
ENV CARGO_CHEF_TAG=0.1.41
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
&& rm -rf $CARGO_HOME/registry/
# Install Go
RUN rm -rf /usr/local/go
# for 1.17
# RUN wget https://go.dev/dl/go1.17.13.linux-amd64.tar.gz
# RUN tar -C /usr/local -xzf go1.17.13.linux-amd64.tar.gz
# for 1.18
RUN wget https://go.dev/dl/go1.18.9.linux-amd64.tar.gz
RUN tar -C /usr/local -xzf go1.18.9.linux-amd64.tar.gz
ENV PATH="/usr/local/go/bin:${PATH}"

View File

@@ -6,11 +6,13 @@ ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
RUN apk add --no-cache \
ca-certificates \
gcc \
git \
musl-dev
ENV RUSTUP_HOME=/usr/local/rustup \
CARGO_HOME=/usr/local/cargo \
PATH=/usr/local/cargo/bin:$PATH
PATH=/usr/local/cargo/bin:$PATH \
CARGO_NET_GIT_FETCH_WITH_CLI=true
RUN set -eux; \
apkArch="$(apk --print-arch)"; \

View File

@@ -3,7 +3,7 @@ FROM ubuntu:20.04
RUN apt-get update && ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime
# Install basic packages
RUN apt-get install build-essential curl git pkg-config -y
RUN apt-get install build-essential curl wget git pkg-config -y
# Install dev-packages
RUN apt-get install libclang-dev libssl-dev llvm -y

View File

@@ -0,0 +1,14 @@
#!/bin/bash
set -uex
${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
npx cobertura-merge -o cobertura.xml \
package1=coverage.bridge.xml \
package2=coverage.db.xml \
package3=coverage.common.xml \
package4=coverage.coordinator.xml \
package5=coverage.integration.xml

View File

@@ -0,0 +1,63 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
pipeline {
agent any
options {
timeout (20)
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
// LOG_DOCKER = 'true'
}
stages {
stage('Tag') {
steps {
script {
TAGNAME = sh(returnStdout: true, script: 'git tag -l --points-at HEAD')
sh "echo ${TAGNAME}"
// ...
}
}
}
stage('Build') {
environment {
// Extract the username and password of our credentials into "DOCKER_CREDENTIALS_USR" and "DOCKER_CREDENTIALS_PSW".
// (NOTE 1: DOCKER_CREDENTIALS will be set to "your_username:your_password".)
// The new variables will always be YOUR_VARIABLE_NAME + _USR and _PSW.
// (NOTE 2: You can't print credentials in the pipeline for security reasons.)
DOCKER_CREDENTIALS = credentials('dockerhub')
}
steps {
withCredentials([usernamePassword(credentialsId: "${credentialDocker}", passwordVariable: 'dockerPassword', usernameVariable: 'dockerUser')]) {
// Use a scripted pipeline.
script {
stage('Push image') {
if (TAGNAME == ""){
return;
}
sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C bridge docker"
sh "make -C coordinator docker"
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
sh "docker push scrolltech/bridge:${TAGNAME}"
sh "docker push scrolltech/coordinator:${TAGNAME}"
}
}
}
}
}
}
post {
always {
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${TAGNAME} Tag build ${currentBuild.result}")
}
}
}

2
common/.gitignore vendored
View File

@@ -1,2 +1,4 @@
/build/bin
.idea
libzkp/impl/target
libzkp/interface/*.a

View File

@@ -5,3 +5,4 @@ test:
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
cd libzkp/impl && cargo fmt --all -- --check && cargo clippy --release -- -D warnings

89
common/cmd/cmd.go Normal file
View File

@@ -0,0 +1,89 @@
package cmd
import (
"os"
"os/exec"
"strings"
"sync"
"testing"
cmap "github.com/orcaman/concurrent-map"
)
var verbose bool
func init() {
v := os.Getenv("LOG_DOCKER")
if v == "true" || v == "TRUE" {
verbose = true
}
}
type checkFunc func(buf string)
// Cmd struct
type Cmd struct {
*testing.T
name string
args []string
mu sync.Mutex
cmd *exec.Cmd
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
//stdout bytes.Buffer
Err error
}
// NewCmd create Cmd instance.
func NewCmd(t *testing.T, name string, args ...string) *Cmd {
return &Cmd{
T: t,
checkFuncs: cmap.New(),
name: name,
args: args,
}
}
// RegistFunc register check func
func (t *Cmd) RegistFunc(key string, check checkFunc) {
t.checkFuncs.Set(key, check)
}
// UnRegistFunc unregister check func
func (t *Cmd) UnRegistFunc(key string) {
t.checkFuncs.Pop(key)
}
func (t *Cmd) runCmd() {
cmd := exec.Command(t.args[0], t.args[1:]...) //nolint:gosec
cmd.Stdout = t
cmd.Stderr = t
_ = cmd.Run()
}
// RunCmd parallel running when parallel is true.
func (t *Cmd) RunCmd(parallel bool) {
t.Log("cmd: ", t.args)
if parallel {
go t.runCmd()
} else {
t.runCmd()
}
}
func (t *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose {
t.Logf("%s: %v", t.name, out)
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
t.Logf("%s: %v", t.name, out)
}
go t.checkFuncs.IterCb(func(_ string, value interface{}) {
check := value.(checkFunc)
check(out)
})
return len(data), nil
}

114
common/cmd/cmd_app.go Normal file
View File

@@ -0,0 +1,114 @@
package cmd
import (
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/docker/docker/pkg/reexec"
"github.com/stretchr/testify/assert"
)
// RunApp exec's the current binary using name as argv[0] which will trigger the
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
func (t *Cmd) RunApp(waitResult func() bool) {
t.Log("cmd: ", append([]string{t.name}, t.args...))
cmd := &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{t.name}, t.args...),
Stderr: t,
Stdout: t,
}
if waitResult != nil {
go func() {
_ = cmd.Run()
}()
waitResult()
} else {
_ = cmd.Run()
}
t.mu.Lock()
t.cmd = cmd
t.mu.Unlock()
}
// WaitExit wait util process exit.
func (t *Cmd) WaitExit() {
// Wait all the check funcs are finished or test status is failed.
for !(t.Failed() || t.checkFuncs.IsEmpty()) {
<-time.After(time.Millisecond * 500)
}
// Send interrupt signal.
t.mu.Lock()
_ = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// Interrupt send interrupt signal.
func (t *Cmd) Interrupt() {
t.mu.Lock()
t.Err = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// WaitResult return true when get the keyword during timeout.
func (t *Cmd) WaitResult(timeout time.Duration, keyword string) bool {
if keyword == "" {
return false
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return true
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
return false
}
// ExpectWithTimeout wait result during timeout time.
func (t *Cmd) ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string) {
if keyword == "" {
return
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
waitResult := func() {
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
}
if parallel {
go waitResult()
} else {
waitResult()
}
}

42
common/cmd/cmd_test.go Normal file
View File

@@ -0,0 +1,42 @@
package cmd_test
import (
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"scroll-tech/common/cmd"
)
func TestCmd(t *testing.T) {
app := cmd.NewCmd(t, "curTime", "date", "+%Y-%m-%d")
tm := time.Now()
curTime := fmt.Sprintf("%d-%02d-%02d", tm.Year(), tm.Month(), tm.Day())
okCh := make(chan struct{}, 1)
app.RegistFunc(curTime, func(buf string) {
if strings.Contains(buf, curTime) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer app.UnRegistFunc(curTime)
// Run cmd.
app.RunCmd(true)
// Wait result.
select {
case <-okCh:
return
case <-time.After(time.Second):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", curTime))
}
}

View File

@@ -1,84 +0,0 @@
package docker
import (
"errors"
"os/exec"
"strings"
"sync"
"testing"
)
type checkFunc func(buf string)
// Cmd struct
type Cmd struct {
*testing.T
checkFuncs sync.Map //map[string]checkFunc
//stdout bytes.Buffer
errMsg chan error
}
// NewCmd create Cmd instance.
func NewCmd(t *testing.T) *Cmd {
cmd := &Cmd{
T: t,
//stdout: bytes.Buffer{},
errMsg: make(chan error, 2),
}
cmd.RegistFunc("panic", func(buf string) {
if strings.Contains(buf, "panic") {
cmd.errMsg <- errors.New(buf)
}
})
return cmd
}
// RegistFunc register check func
func (t *Cmd) RegistFunc(key string, check checkFunc) {
t.checkFuncs.Store(key, check)
}
// UnRegistFunc unregister check func
func (t *Cmd) UnRegistFunc(key string) {
if _, ok := t.checkFuncs.Load(key); ok {
t.checkFuncs.Delete(key)
}
}
// RunCmd parallel running when parallel is true.
func (t *Cmd) RunCmd(args []string, parallel bool) {
t.Log("RunCmd cmd", args)
if parallel {
go t.runCmd(args)
} else {
t.runCmd(args)
}
}
// ErrMsg return error output channel
func (t *Cmd) ErrMsg() <-chan error {
return t.errMsg
}
func (t *Cmd) Write(data []byte) (int, error) {
out := string(data)
t.Logf(out)
go func(content string) {
t.checkFuncs.Range(func(key, value interface{}) bool {
check := value.(checkFunc)
check(content)
return true
})
}(out)
return len(data), nil
}
func (t *Cmd) runCmd(args []string) {
cmd := exec.Command(args[0], args[1:]...) //nolint:gosec
cmd.Stdout = t
cmd.Stderr = t
_ = cmd.Run()
}

View File

@@ -8,24 +8,11 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"scroll-tech/common/docker"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
var (
cli *client.Client
)
func init() {
var err error
cli, err = client.NewClientWithOpts(client.FromEnv)
if err != nil {
panic(err)
}
cli.NegotiateAPIVersion(context.Background())
}
// ImgDB the postgres image manager.
type ImgDB struct {
image string
@@ -37,28 +24,29 @@ type ImgDB struct {
password string
running bool
*docker.Cmd
cmd *cmd.Cmd
}
// NewImgDB return postgres db img instance.
func NewImgDB(t *testing.T, image, password, dbName string, port int) docker.ImgInstance {
return &ImgDB{
func NewImgDB(t *testing.T, image, password, dbName string, port int) ImgInstance {
img := &ImgDB{
image: image,
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
password: password,
dbName: dbName,
port: port,
Cmd: docker.NewCmd(t),
}
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
return img
}
// Start postgres db container.
func (i *ImgDB) Start() error {
id := docker.GetContainerID(i.name)
id := GetContainerID(i.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.Cmd.RunCmd(i.prepare(), true)
i.cmd.RunCmd(true)
i.running = i.isOk()
if !i.running {
_ = i.Stop()
@@ -75,14 +63,13 @@ func (i *ImgDB) Stop() error {
i.running = false
ctx := context.Background()
// check if container is running, stop the running container.
id := docker.GetContainerID(i.name)
if id != "" {
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
return err
}
i.id = id
// stop the running container.
if i.id == "" {
i.id = GetContainerID(i.name)
}
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, i.id, &timeout); err != nil {
return err
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
@@ -110,7 +97,7 @@ func (i *ImgDB) prepare() []string {
func (i *ImgDB) isOk() bool {
keyword := "database system is ready to accept connections"
okCh := make(chan struct{}, 1)
i.RegistFunc(keyword, func(buf string) {
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
@@ -119,14 +106,16 @@ func (i *ImgDB) isOk() bool {
}
}
})
defer i.UnRegistFunc(keyword)
defer i.cmd.UnRegistFunc(keyword)
select {
case <-okCh:
time.Sleep(time.Millisecond * 1500)
i.id = docker.GetContainerID(i.name)
utils.TryTimes(3, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
return i.id != ""
case <-time.NewTimer(time.Second * 10).C:
case <-time.After(time.Second * 20):
return false
}
}

View File

@@ -9,22 +9,11 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
)
var (
cli *client.Client
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
func init() {
var err error
cli, err = client.NewClientWithOpts(client.FromEnv)
if err != nil {
panic(err)
}
cli.NegotiateAPIVersion(context.Background())
}
// ImgGeth the geth image manager include l1geth and l2geth.
type ImgGeth struct {
image string
@@ -37,20 +26,21 @@ type ImgGeth struct {
wsPort int
running bool
*Cmd
cmd *cmd.Cmd
}
// NewImgGeth return geth img instance.
func NewImgGeth(t *testing.T, image, volume, ipc string, hPort, wPort int) ImgInstance {
return &ImgGeth{
img := &ImgGeth{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Unix()),
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
volume: volume,
ipcPath: ipc,
httpPort: hPort,
wsPort: wPort,
Cmd: NewCmd(t),
}
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
return img
}
// Start run image and check if it is running healthily.
@@ -59,7 +49,7 @@ func (i *ImgGeth) Start() error {
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.Cmd.RunCmd(i.prepare(), true)
i.cmd.RunCmd(true)
i.running = i.isOk()
if !i.running {
_ = i.Stop()
@@ -86,7 +76,7 @@ func (i *ImgGeth) Endpoint() string {
func (i *ImgGeth) isOk() bool {
keyword := "WebSocket enabled"
okCh := make(chan struct{}, 1)
i.RegistFunc(keyword, func(buf string) {
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
@@ -95,13 +85,16 @@ func (i *ImgGeth) isOk() bool {
}
}
})
defer i.UnRegistFunc(keyword)
defer i.cmd.UnRegistFunc(keyword)
select {
case <-okCh:
i.id = GetContainerID(i.name)
utils.TryTimes(3, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
return i.id != ""
case <-time.NewTimer(time.Second * 10).C:
case <-time.After(time.Second * 10):
return false
}
}

View File

@@ -0,0 +1,120 @@
package docker
import (
"context"
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/docker/docker/api/types"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgRedis the redis image.
type ImgRedis struct {
image string
name string
id string
port int
running bool
cmd *cmd.Cmd
}
// NewImgRedis return redis img instance.
func NewImgRedis(t *testing.T, image string, port int) ImgInstance {
img := &ImgRedis{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
port: port,
}
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
return img
}
// Start run image and check if it is running healthily.
func (r *ImgRedis) Start() error {
id := GetContainerID(r.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", r.name)
}
// Add check status function.
keyword := "Ready to accept connections"
okCh := make(chan struct{}, 1)
r.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer r.cmd.UnRegistFunc(keyword)
// Start redis.
r.cmd.RunCmd(true)
// Wait result of keyword.
select {
case <-okCh:
utils.TryTimes(3, func() bool {
r.id = GetContainerID(r.name)
return r.id != ""
})
case <-time.After(time.Second * 10):
}
// Set redis status.
r.running = r.id != ""
if !r.running {
_ = r.Stop()
return fmt.Errorf("failed to start image: %s", r.image)
}
return nil
}
// Stop the docker container.
func (r *ImgRedis) Stop() error {
if !r.running {
return nil
}
r.running = false
ctx := context.Background()
id := GetContainerID(r.name)
if id != "" {
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
return err
}
r.id = id
}
// remove the stopped container.
return cli.ContainerRemove(ctx, r.id, types.ContainerRemoveOptions{})
}
// Endpoint return the connection endpoint.
func (r *ImgRedis) Endpoint() string {
if !r.running {
return ""
}
port := 6379
if r.port != 0 {
port = r.port
}
return fmt.Sprintf("redis://default:redistest@localhost:%d/0", port)
}
// docker run --name redis-xxx -p randomport:6379 --requirepass "redistest" redis
func (r *ImgRedis) prepare() []string {
cmds := []string{"docker", "run", "--name", r.name}
var ports []string
if r.port != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(r.port) + ":6379"}...)
}
return append(append(cmds, ports...), r.image)
}

View File

@@ -4,16 +4,26 @@ import (
"context"
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
)
func TestL1Geth(t *testing.T) {
func TestDocker(t *testing.T) {
t.Parallel()
t.Run("testL1Geth", testL1Geth)
t.Run("testL2Geth", testL2Geth)
t.Run("testDB", testDB)
t.Run("testRedis", testRedis)
}
func testL1Geth(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
img := NewImgGeth(t, "scroll_l1geth", "", "", 8535, 0)
assert.NoError(t, img.Start())
img := NewTestL1Docker(t)
defer img.Stop()
client, err := ethclient.Dial(img.Endpoint())
@@ -24,12 +34,11 @@ func TestL1Geth(t *testing.T) {
t.Logf("chainId: %s", chainID.String())
}
func TestL2Geth(t *testing.T) {
func testL2Geth(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
img := NewImgGeth(t, "scroll_l2geth", "", "", 8535, 0)
assert.NoError(t, img.Start())
img := NewTestL2Docker(t)
defer img.Stop()
client, err := ethclient.Dial(img.Endpoint())
@@ -39,3 +48,18 @@ func TestL2Geth(t *testing.T) {
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}
func testDB(t *testing.T) {
driverName := "postgres"
dbImg := NewTestDBDocker(t, driverName)
defer dbImg.Stop()
db, err := sqlx.Open(driverName, dbImg.Endpoint())
assert.NoError(t, err)
assert.NoError(t, db.Ping())
}
func testRedis(t *testing.T) {
redisImg := NewTestRedisDocker(t)
defer redisImg.Stop()
}

View File

@@ -5,8 +5,22 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
)
var (
cli *client.Client
)
func init() {
var err error
cli, err = client.NewClientWithOpts(client.FromEnv)
if err != nil {
panic(err)
}
cli.NegotiateAPIVersion(context.Background())
}
// ImgInstance is an img instance.
type ImgInstance interface {
Start() error
@@ -19,10 +33,12 @@ func GetContainerID(name string) string {
filter := filters.NewArgs()
filter.Add("name", name)
lst, _ := cli.ContainerList(context.Background(), types.ContainerListOptions{
Latest: true,
Limit: 1,
Filters: filter,
})
if len(lst) > 0 {
return lst[0].Names[0]
return lst[0].ID
}
return ""
}

View File

@@ -1,4 +1,4 @@
FROM scrolltech/l2geth:prealpha-v2.2
FROM scrolltech/l2geth:prealpha-v4.2
RUN mkdir -p /l2geth/keystore

101
common/docker/mock.go Normal file
View File

@@ -0,0 +1,101 @@
package docker
import (
"context"
"crypto/rand"
"math/big"
"testing"
"github.com/go-redis/redis/v8"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/utils"
)
var (
l1StartPort = 10000
l2StartPort = 20000
dbStartPort = 30000
rsStartPort = 40000
)
// NewTestL1Docker starts and returns l1geth docker
func NewTestL1Docker(t *testing.T) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL1geth := NewImgGeth(t, "scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
assert.NoError(t, imgL1geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(3, func() bool {
client, _ := ethclient.Dial(imgL1geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL1geth
}
// NewTestL2Docker starts and returns l2geth docker
func NewTestL2Docker(t *testing.T) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL2geth := NewImgGeth(t, "scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
assert.NoError(t, imgL2geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(3, func() bool {
client, _ := ethclient.Dial(imgL2geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL2geth
}
// NewTestDBDocker starts and returns database docker
func NewTestDBDocker(t *testing.T, driverName string) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgDB := NewImgDB(t, driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
assert.NoError(t, imgDB.Start())
// try 5 times until the db is ready.
utils.TryTimes(5, func() bool {
db, _ := sqlx.Open(driverName, imgDB.Endpoint())
if db != nil {
return db.Ping() == nil
}
return false
})
return imgDB
}
// NewTestRedisDocker starts and run redis docker.
func NewTestRedisDocker(t *testing.T) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgRedis := NewImgRedis(t, "redis", rsStartPort+int(id.Int64()))
assert.NoError(t, imgRedis.Start())
op, err := redis.ParseURL(imgRedis.Endpoint())
assert.NoError(t, err)
if t.Failed() {
return nil
}
rdb := redis.NewClient(op)
utils.TryTimes(3, func() bool {
err = rdb.Ping(context.Background()).Err()
return err == nil
})
return imgRedis
}

View File

@@ -3,53 +3,62 @@ module scroll-tech/common
go 1.18
require (
github.com/docker/docker v20.10.17+incompatible
github.com/docker/docker v20.10.21+incompatible
github.com/go-redis/redis/v8 v8.11.5
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.6
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.14
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d
github.com/stretchr/testify v1.8.0
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e
github.com/stretchr/testify v1.8.1
github.com/urfave/cli/v2 v2.10.2
)
require (
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/go-ethereum v1.10.13 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/huin/goupnp v1.0.2 // indirect
github.com/iden3/go-iden3-crypto v0.0.12 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.13 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/gomega v1.25.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
@@ -58,7 +67,10 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.3.1 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
@@ -66,14 +78,16 @@ require (
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/net v0.5.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/text v0.6.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/tools v0.1.12 // indirect
golang.org/x/tools v0.3.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

View File

@@ -77,8 +77,9 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -87,6 +88,8 @@ github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3h
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
@@ -96,20 +99,23 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -121,8 +127,9 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.10.13 h1:DEYFP9zk+Gruf3ae1JOJVhNmxK28ee+sMELPLgYTXpA=
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
@@ -150,8 +157,12 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
@@ -192,8 +203,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -207,10 +218,12 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -222,12 +235,14 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/iden3/go-iden3-crypto v0.0.12 h1:dXZF+R9iI07DK49LHX/EKC3jTa0O2z+TUyvxjGK7V38=
github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
github.com/iden3/go-iden3-crypto v0.0.13 h1:ixWRiaqDULNyIDdOWz2QQJG5t4PpNHkQk2P6GV94cok=
github.com/iden3/go-iden3-crypto v0.0.13/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8=
@@ -243,12 +258,15 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@@ -272,8 +290,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -284,6 +303,9 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
@@ -304,6 +326,9 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -321,20 +346,21 @@ github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
@@ -343,12 +369,15 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -372,11 +401,19 @@ github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d h1:eh1i1M9BKPCQckNFQsV/yfazQ895hevkWr8GuqhKNrk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d/go.mod h1:SkQ1431r0LkrExTELsapw6JQHYpki8O1mSsObTSmBWU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e h1:TAqAeQiQI6b+TRyqyQ6qhizqY35LhqYe8lWhG0nNRGw=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@@ -398,6 +435,7 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZL
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -405,8 +443,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@@ -419,11 +458,15 @@ github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hM
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.10.2 h1:x3p8awjp/2arX+Nl/G2040AZpOCHS/eMJJ1/a+mye4Y=
github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
@@ -446,8 +489,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -477,8 +520,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -505,8 +548,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E=
golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -521,8 +564,9 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -569,8 +613,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -581,8 +625,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -618,12 +663,11 @@ golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=

View File

@@ -301,7 +301,7 @@ checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#bd9b8eec349bbff14c3d6fd0152b0cb6334b4701"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#123e4bbd23f196a71037daf40fae66b7ca24a02e"
dependencies = [
"eth-types",
"ethers-core",
@@ -797,7 +797,7 @@ dependencies = [
"crypto-bigint",
"der",
"digest 0.10.3",
"ff",
"ff 0.12.1",
"generic-array 0.14.6",
"group",
"pkcs8",
@@ -875,7 +875,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#bd9b8eec349bbff14c3d6fd0152b0cb6334b4701"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#123e4bbd23f196a71037daf40fae66b7ca24a02e"
dependencies = [
"ethers-core",
"ethers-signers",
@@ -1027,7 +1027,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#bd9b8eec349bbff14c3d6fd0152b0cb6334b4701"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#123e4bbd23f196a71037daf40fae66b7ca24a02e"
dependencies = [
"eth-types",
"geth-utils",
@@ -1066,6 +1066,17 @@ dependencies = [
"syn",
]
[[package]]
name = "ff"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924"
dependencies = [
"bitvec 0.22.3",
"rand_core",
"subtle",
]
[[package]]
name = "ff"
version = "0.12.1"
@@ -1233,7 +1244,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#bd9b8eec349bbff14c3d6fd0152b0cb6334b4701"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#123e4bbd23f196a71037daf40fae66b7ca24a02e"
dependencies = [
"digest 0.7.6",
"eth-types",
@@ -1273,7 +1284,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#bd9b8eec349bbff14c3d6fd0152b0cb6334b4701"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#123e4bbd23f196a71037daf40fae66b7ca24a02e"
dependencies = [
"gobuild",
]
@@ -1330,7 +1341,7 @@ version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7"
dependencies = [
"ff",
"ff 0.12.1",
"rand_core",
"subtle",
]
@@ -1369,10 +1380,11 @@ dependencies = [
[[package]]
name = "halo2-mpt-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0902#dfaea2c6a55249abf68db60f963e44eb7c1646d6"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0920-fix#d6bd0f291d41c4585e783d2d94a77fd80e1ba47e"
dependencies = [
"bitvec 0.22.3",
"ff 0.11.1",
"halo2_proofs",
"hash-circuit",
"hex",
"lazy_static",
"num-bigint",
@@ -1439,11 +1451,11 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6f18f38e82d302cd8b6ce8809b59c32350b019a3"
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6b8c8a07da9fbf5d5878831e35360b8c5e6d89a3"
dependencies = [
"blake2b_simd",
"cfg-if 0.1.10",
"ff",
"ff 0.12.1",
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.2.1)",
"log",
@@ -1461,7 +1473,7 @@ name = "halo2curves"
version = "0.2.1"
source = "git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.2.1#f75ed26c961179186e9cec02cc3f841ca9e3fec1"
dependencies = [
"ff",
"ff 0.12.1",
"group",
"lazy_static",
"num-bigint",
@@ -1478,7 +1490,7 @@ name = "halo2curves"
version = "0.2.1"
source = "git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0#83c72d49762343ffc9576ca11a2aa615efe1029b"
dependencies = [
"ff",
"ff 0.12.1",
"group",
"lazy_static",
"num-bigint",
@@ -1502,17 +1514,6 @@ dependencies = [
"num-traits",
]
[[package]]
name = "hash-circuit"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/hash-circuit.git#4d5b291948276bcf0c8636f7722d108a6f3a914f"
dependencies = [
"bitvec 0.22.3",
"halo2_proofs",
"lazy_static",
"thiserror",
]
[[package]]
name = "hashbrown"
version = "0.12.3"
@@ -1857,7 +1858,7 @@ checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838"
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#bd9b8eec349bbff14c3d6fd0152b0cb6334b4701"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#123e4bbd23f196a71037daf40fae66b7ca24a02e"
dependencies = [
"eth-types",
"halo2_proofs",
@@ -2011,7 +2012,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#bd9b8eec349bbff14c3d6fd0152b0cb6334b4701"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#123e4bbd23f196a71037daf40fae66b7ca24a02e"
dependencies = [
"eth-types",
"ethers-core",
@@ -2223,7 +2224,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5cc65faf8e7313b4b1fbaa9f7ca917a0eed499a9663be71477f87993604341d8"
dependencies = [
"blake2b_simd",
"ff",
"ff 0.12.1",
"group",
"lazy_static",
"rand",
@@ -2396,7 +2397,7 @@ dependencies = [
[[package]]
name = "poseidon"
version = "0.2.0"
source = "git+https://github.com/appliedzkp/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
source = "git+https://github.com/appliedzkp/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
dependencies = [
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
@@ -2406,7 +2407,7 @@ dependencies = [
[[package]]
name = "poseidon"
version = "0.2.0"
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
dependencies = [
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
@@ -2491,20 +2492,6 @@ dependencies = [
"rustix",
]
[[package]]
name = "prover"
version = "0.1.0"
dependencies = [
"libc",
"log",
"once_cell",
"serde",
"serde_derive",
"serde_json",
"types",
"zkevm",
]
[[package]]
name = "quote"
version = "1.0.21"
@@ -3402,7 +3389,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/common-rs#bad0b6f6da869d40178569c4658c58985c5629fa"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
dependencies = [
"base64 0.13.0",
"blake2",
@@ -3829,7 +3816,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
[[package]]
name = "zkevm"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/common-rs#bad0b6f6da869d40178569c4658c58985c5629fa"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
dependencies = [
"anyhow",
"blake2",
@@ -3865,7 +3852,7 @@ dependencies = [
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#bd9b8eec349bbff14c3d6fd0152b0cb6334b4701"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=scroll-dev-0920#123e4bbd23f196a71037daf40fae66b7ca24a02e"
dependencies = [
"array-init",
"bus-mapping",
@@ -3894,3 +3881,18 @@ dependencies = [
"strum_macros",
"subtle",
]
[[package]]
name = "zkp"
version = "0.1.0"
dependencies = [
"env_logger",
"libc",
"log",
"once_cell",
"serde",
"serde_derive",
"serde_json",
"types",
"zkevm",
]

View File

@@ -1,17 +1,18 @@
[package]
name = "prover"
name = "zkp"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["staticlib", "cdylib"]
crate-type = ["staticlib"]
[dependencies]
zkevm = { git = "https://github.com/scroll-tech/common-rs"}
types = { git = "https://github.com/scroll-tech/common-rs"}
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
log = "0.4"
env_logger = "0.9.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
@@ -25,4 +26,4 @@ debug-assertions = true
[profile.release]
opt-level = 3
debug-assertions = true
debug-assertions = true

View File

@@ -1,6 +1,7 @@
#![feature(once_cell)]
pub mod prove;
pub mod verify;
pub(crate) mod utils {
use std::ffi::{CStr, CString};

View File

@@ -1,7 +1,7 @@
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char};
use libc::c_char;
use std::cell::OnceCell;
use types::eth::BlockResult;
use types::eth::BlockTrace;
use zkevm::circuit::AGG_DEGREE;
use zkevm::utils::{load_or_create_params, load_or_create_seed};
use zkevm::{circuit::DEGREE, prover::Prover};
@@ -11,6 +11,8 @@ static mut PROVER: OnceCell<Prover> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_prover(params_path: *const c_char, seed_path: *const c_char) {
env_logger::init();
let params_path = c_char_to_str(params_path);
let seed_path = c_char_to_str(seed_path);
let params = load_or_create_params(params_path, *DEGREE).unwrap();
@@ -24,7 +26,7 @@ pub unsafe extern "C" fn init_prover(params_path: *const c_char, seed_path: *con
#[no_mangle]
pub unsafe extern "C" fn create_agg_proof(trace_char: *const c_char) -> *const c_char {
let trace_vec = c_char_to_vec(trace_char);
let trace = serde_json::from_slice::<BlockResult>(&trace_vec).unwrap();
let trace = serde_json::from_slice::<BlockTrace>(&trace_vec).unwrap();
let proof = PROVER
.get_mut()
.unwrap()
@@ -33,3 +35,17 @@ pub unsafe extern "C" fn create_agg_proof(trace_char: *const c_char) -> *const c
let proof_bytes = serde_json::to_vec(&proof).unwrap();
vec_to_c_char(proof_bytes)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn create_agg_proof_multi(trace_char: *const c_char) -> *const c_char {
let trace_vec = c_char_to_vec(trace_char);
let traces = serde_json::from_slice::<Vec<BlockTrace>>(&trace_vec).unwrap();
let proof = PROVER
.get_mut()
.unwrap()
.create_agg_circuit_proof_multi(traces.as_slice())
.unwrap();
let proof_bytes = serde_json::to_vec(&proof).unwrap();
vec_to_c_char(proof_bytes)
}

View File

@@ -0,0 +1,40 @@
use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use std::fs::File;
use std::io::Read;
use zkevm::circuit::{AGG_DEGREE, DEGREE};
use zkevm::prover::AggCircuitProof;
use zkevm::utils::load_or_create_params;
use zkevm::verifier::Verifier;
static mut VERIFIER: Option<&Verifier> = None;
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_verifier(params_path: *const c_char, agg_vk_path: *const c_char) {
env_logger::init();
let params_path = c_char_to_str(params_path);
let agg_vk_path = c_char_to_str(agg_vk_path);
let mut f = File::open(agg_vk_path).unwrap();
let mut agg_vk = vec![];
f.read_to_end(&mut agg_vk).unwrap();
let params = load_or_create_params(params_path, *DEGREE).unwrap();
let agg_params = load_or_create_params(params_path, *AGG_DEGREE).unwrap();
let v = Box::new(Verifier::from_params(params, agg_params, Some(agg_vk)));
VERIFIER = Some(Box::leak(v))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_agg_proof(proof: *const c_char) -> c_char {
let proof_vec = c_char_to_vec(proof);
let agg_proof = serde_json::from_slice::<AggCircuitProof>(proof_vec.as_slice()).unwrap();
let verified = VERIFIER
.unwrap()
.verify_agg_circuit_proof(agg_proof)
.is_ok();
verified as c_char
}

View File

@@ -0,0 +1,5 @@
init_prover(char *params_path, char *seed_path);
char* create_agg_proof(char *trace);
char* create_agg_proof_multi(char *trace);
init_verifier(char *params_path, char *agg_vk_path);
char verify_agg_proof(char *proof);

View File

@@ -2,56 +2,32 @@ package message
import (
"crypto/ecdsa"
"encoding/binary"
"encoding/json"
"crypto/rand"
"encoding/hex"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"golang.org/x/crypto/blake2s"
"github.com/scroll-tech/go-ethereum/rlp"
)
// MsgType denotes the type of message being sent or received.
type MsgType uint16
const (
// Error message.
Error MsgType = iota
// Register message, sent by a roller when a connection is established.
Register
// BlockTrace message, sent by a sequencer to a roller to notify them
// they need to generate a proof.
BlockTrace
// Proof message, sent by a roller to a sequencer when they have finished
// proof generation of a given set of block traces.
Proof
)
// RespStatus is the status of the proof generation
// RespStatus represents status code from roller to scroll
type RespStatus uint32
const (
// StatusOk indicates the proof generation succeeded
// StatusOk means generate proof success
StatusOk RespStatus = iota
// StatusProofError indicates the proof generation failed
// StatusProofError means generate proof failed
StatusProofError
)
// Msg is the top-level message container which contains the payload and the
// message identifier.
type Msg struct {
// Message type
Type MsgType `json:"type"`
// Message payload
Payload []byte `json:"payload"`
}
// AuthMessage is the first message exchanged from the Roller to the Sequencer.
// AuthMsg is the first message exchanged from the Roller to the Sequencer.
// It effectively acts as a registration, and makes the Roller identification
// known to the Sequencer.
type AuthMessage struct {
type AuthMsg struct {
// Message fields
Identity Identity `json:"message"`
Identity *Identity `json:"message"`
// Roller signature
Signature string `json:"signature"`
}
@@ -60,14 +36,28 @@ type AuthMessage struct {
type Identity struct {
// Roller name
Name string `json:"name"`
// Time of message creation
Timestamp int64 `json:"timestamp"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Roller public key
PublicKey string `json:"publicKey"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
Version string `json:"version"`
// Random unique token generated by manager
Token string `json:"token"`
}
// GenerateToken generates token
func GenerateToken() (string, error) {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
return "", err
}
return hex.EncodeToString(b), nil
}
// Sign auth message
func (a *AuthMessage) Sign(priv *ecdsa.PrivateKey) error {
func (a *AuthMsg) Sign(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
@@ -78,43 +68,150 @@ func (a *AuthMessage) Sign(priv *ecdsa.PrivateKey) error {
if err != nil {
return err
}
a.Signature = common.Bytes2Hex(sig)
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
// recover public key
if a.Identity.PublicKey == "" {
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
a.Identity.PublicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
}
return crypto.VerifySignature(common.FromHex(a.Identity.PublicKey), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
if a.Identity.PublicKey == "" {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
a.Identity.PublicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
return a.Identity.PublicKey, nil
}
return a.Identity.PublicKey, nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
bs, err := json.Marshal(i)
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
// ProofMsg is the data structure sent to the coordinator.
type ProofMsg struct {
*ProofDetail `json:"zkProof"`
// Roller signature
Signature string `json:"signature"`
// Roller public key
publicKey string
}
// Sign signs the ProofMsg.
func (a *ProofMsg) Sign(priv *ecdsa.PrivateKey) error {
hash, err := a.ProofDetail.Hash()
if err != nil {
return err
}
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies ProofMsg.Signature.
func (a *ProofMsg) Verify() (bool, error) {
hash, err := a.ProofDetail.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
// recover public key
if a.publicKey == "" {
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
}
return crypto.VerifySignature(common.FromHex(a.publicKey), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *ProofMsg) PublicKey() (string, error) {
if a.publicKey == "" {
hash, err := a.ProofDetail.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
return a.publicKey, nil
}
return a.publicKey, nil
}
// TaskMsg is a wrapper type around db ProveTask type.
type TaskMsg struct {
ID string `json:"id"`
Traces []*types.BlockTrace `json:"blockTraces"`
}
// ProofDetail is the message received from rollers that contains zk proof, the status of
// the proof generation succeeded, and an error message if proof generation failed.
type ProofDetail struct {
ID string `json:"id"`
Status RespStatus `json:"status"`
Proof *AggProof `json:"proof"`
Error string `json:"error,omitempty"`
}
// Hash return proofMsg content hash.
func (z *ProofDetail) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(z)
if err != nil {
return nil, err
}
hash := blake2s.Sum256(bs)
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
// BlockTraces is a wrapper type around types.BlockResult which adds an ID
// that identifies which proof generation session these block traces are
// associated to. This then allows the roller to add the ID back to their
// proof message once generated, and in turn helps the sequencer understand
// where to handle the proof.
type BlockTraces struct {
ID uint64 `json:"id"`
Traces *types.BlockResult `json:"blockTraces"`
}
// ProofMsg is the message received from rollers that contains zk proof, the status of
// the proof generation succeeded, and an error message if proof generation failed.
type ProofMsg struct {
Status RespStatus `json:"status"`
Error string `json:"error,omitempty"`
ID uint64 `json:"id"`
// FIXME: Maybe we need to allow Proof omitempty? Also in scroll.
Proof *AggProof `json:"proof"`
}
// AggProof includes the proof and public input that are required to verification and rollup.
type AggProof struct {
Proof []byte `json:"proof"`
@@ -122,14 +219,3 @@ type AggProof struct {
FinalPair []byte `json:"final_pair"`
Vk []byte `json:"vk"`
}
// Marshal marshals the TraceProof as bytes
func (proof *AggProof) Marshal() ([]byte, error) {
jsonByt, err := json.Marshal(proof)
if err != nil {
return nil, err
}
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(len(jsonByt)))
return append(buf, jsonByt...), nil
}

View File

@@ -0,0 +1,33 @@
package message
import (
"testing"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
)
func TestAuthMessageSignAndVerify(t *testing.T) {
privkey, err := crypto.GenerateKey()
assert.NoError(t, err)
authMsg := &AuthMsg{
Identity: &Identity{
Name: "testRoller",
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))
ok, err := authMsg.Verify()
assert.NoError(t, err)
assert.Equal(t, true, ok)
// Check public key is ok.
pub, err := authMsg.PublicKey()
assert.NoError(t, err)
pubkey := crypto.CompressPubkey(&privkey.PublicKey)
assert.Equal(t, pub, common.Bytes2Hex(pubkey))
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

544
common/testdata/blockTrace_02.json vendored Normal file
View File

@@ -0,0 +1,544 @@
{
"coinbase": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"header": {
"parentHash": "0xe17f08d25ef61a8ee12aa29704b901345a597f5e45a9a0f603ae0f70845b54dc",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"transactionsRoot": "0x3057754c197f33e1fe799e996db6232b5257412feea05b3c1754738f0b33fe32",
"receiptsRoot": "0xd95b673818fa493deec414e01e610d97ee287c9421c8eff4102b1647c1a184e4",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x2",
"number": "0x2",
"gasLimit": "0x355418d1e8184",
"gasUsed": "0xa410",
"timestamp": "0x63807b2a",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000004b54a94f0df14333e63c8a13dfe6097c1a08b5fd2c225a8dc0f199dae245aead55d6f774a980a0c925be407748d56a14106afda7ddc1dec342e7ee3b0d58a8df01",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x1de9",
"hash": "0xc7b6c7022c8386cdaf6fcd3d4f8d03dce257ae3664a072fdce511ecefce73ad0"
},
"transactions": [
{
"type": 0,
"nonce": 0,
"txHash": "0xb2febc1213baec968f6575789108e175273b8da8f412468098893084229f1542",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514",
"s": "0x34cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b"
},
{
"type": 0,
"nonce": 1,
"txHash": "0xe6ac2ffc543d07f1e280912a2abe3aa659bf83773740681151297ada1bb211dd",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xf039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316",
"s": "0x5a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1"
}
],
"storageTrace": {
"rootBefore": "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
"rootAfter": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"proofs": {
"0x01bae6BF68E9A03Fb2bc0615b1bf0d69ce9411eD": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0xc0c4C8bAEA3f6Acb49b6E1fb9e2ADEcEeaCB0cA2": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": [
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
},
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
}
],
"mptwitness": [
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
},
{
"pathPart": "0x3",
"root": "0xaf16fd780a8c7616b95b20da69f4ff26e0253238e996f9516445d6d6bf92b725",
"path": [
{
"value": "0x5bbe97e7e66485b203f9dfea64eb7fa7df06959b12cbde2beba14f8f91133a13",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
},
{
"value": "0x2e591357b02ab3117c35ad94a4e1a724fdbd95d6463da1f6c8017e6d000ecf02",
"sibling": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
{
"value": "0x794953bb5d8aa00f90383ff435ce2ea58e30e1da1061e69455c38496766ec10f",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
}
]
}

Some files were not shown because too many files have changed in this diff Show More