Compare commits

...

72 Commits

Author SHA1 Message Date
ChuhanJin
7fce3c30c6 feat(bridge-history-api): maintain withdraw_root in workflow and db (#694)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-08-02 12:51:40 +08:00
ChuhanJin
c5590e9434 refactor(bridge-history-api): server switch to gin framework (#672)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-02 00:05:48 +08:00
Steven
62f7069278 feat: add batch and chunk provers and verifiers (#689)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-08-01 22:45:01 +08:00
HAOYUatHZ
1e9c8fddf6 build(go): upgrade go to 1.19 (#693) 2023-08-01 10:32:45 +08:00
colin
eb9070e1ae feat(relayer): estimate l1 batch commit gas (#659)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-07-31 14:03:38 +02:00
Steven
df54d518cf feat: add ChunkProof message (#688)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-07-30 16:24:36 +08:00
HAOYUatHZ
20e13445f8 refactor: rename roller to prover (#684) 2023-07-28 22:59:27 +08:00
ChuhanJin
e780994146 fix(bridge-history-api): fix error not return 0 (#685) 2023-07-28 22:18:44 +08:00
ChuhanJin
f9a0de0f16 feat(bridge-history-api): switch to v2 decoding (#683)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-28 13:55:55 +02:00
colin
3eb62880fe feat(orm): add more info for agg proving (#678) 2023-07-28 19:27:15 +08:00
Haichen Shen
e2612a3d88 fix(contracts): Fix typos in the contract function name and docs (#679) 2023-07-27 09:58:07 -07:00
Péter Garamvölgyi
530db9e2e1 fix(build): update verifier lib path (#682) 2023-07-27 13:48:14 +02:00
Péter Garamvölgyi
c12b1fd8f2 fix(contracts): adjust deployment scripts (#680) 2023-07-27 12:40:12 +02:00
Péter Garamvölgyi
bab1982c30 fix(build): include prover-stats-api in dockerfiles (#681) 2023-07-27 12:34:10 +02:00
Xi Lin
b0ee9fa519 refactor(contracts): OZ-L1-N01 Constant Not Using UPPER_CASE Format (#674) 2023-07-26 23:05:56 -07:00
Péter Garamvölgyi
d8cc69501e fix(contracts): initialize L2GasOracle with correct intrinsic gas params. (#677) 2023-07-26 10:00:39 +02:00
Haichen Shen
2eb458cf42 fix(contracts): OZ-L1-N10 SimpleGasOracle Is Not Used (#671) 2023-07-25 10:25:27 -07:00
Xi Lin
3832422bc9 fix(contracts): OZ-L2-L06 Initialization Performed Outside of Initialization Function (#652)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-07-25 10:24:53 -07:00
Xi Lin
4d96c12e7b fix(contracts): OZ-L1-N02 Error-Prone Call Encoding and OZ-L2-L03 Unsafe ABI Encoding (#668)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-24 23:05:54 +08:00
Xi Lin
f56997bf15 fix(contracts): OZ-L2-L07 Block Container Does Not Enforce Whitelist (#651)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-24 22:57:52 +08:00
Xi Lin
2bd9694348 fix(contracts): OZ-L2-L05 Lack of Event Emissions (#650)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-24 22:43:43 +08:00
Xi Lin
8b6c237d74 fix(contracts): OZ-L2-L01 Lack of Validation When Updating Maximum Failed Execution Tries (#649)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-24 22:29:05 +08:00
Xi Lin
0fc6d2a5e5 fix(contracts): OZ-L2-M02 WETH9 Approval Can Be Front-Run (#632)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-24 22:16:12 +08:00
Xi Lin
0ce3b182a8 fix(contracts): OZ-L2-M01 L2MessageQueue Stores Incorrect Value if not Initialized Before Appending (#630)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-24 22:04:55 +08:00
ChuhanJin
5c4f7c33fd refactor(bridge-history-api): use Gorm (#656)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2023-07-24 17:35:43 +08:00
Xi Lin
e8c66e4597 fix(contracts): fix getL1GasUsed in L1GasPriceOracle (#665)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-07-23 04:11:23 -07:00
HAOYUatHZ
de1d9b98ec bump version (#666) 2023-07-22 15:37:13 +08:00
georgehao
58e07a7481 refactor(coordinator): update coordinator orm and layout (#521)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2023-07-22 09:24:58 +08:00
colin
3880cdc1af refactor(scroll): update dependencies and format goimports in prover-stats-api module (#662) 2023-07-21 15:03:32 +08:00
Xi Lin
94c557bfed fix(contracts): remove retry with proof in L2ScrollMessenger (#658) 2023-07-21 14:19:01 +08:00
Xi Lin
523cc2cb7e fix(contracts): OZ-L1-L02 Initialization Not Disabled for Implementation Contracts (#639)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 21:14:35 +08:00
Xi Lin
af6d81255c fix(contracts): OZ-L1-M01 Enforced Transactions Signed Off-Chain Are Likely to Fail (#620)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 21:06:12 +08:00
Xi Lin
b5bbb72756 fix(contracts): OZ-L1-L10 Unpinned Compiler Version (#636)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 20:57:43 +08:00
Lawliet-Chan
92c52817e6 fix(prover-stats-api): fix readme (#661)
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
2023-07-20 18:06:59 +08:00
colin
076cfc1eae refactor(relayer): optimize l1 block schema (#648)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 17:26:43 +08:00
Xi Lin
25e4b6c19d fix(contracts): OZ-L1-L04 Lost Funds in Messenger Contracts (#637)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 17:08:02 +08:00
Péter Garamvölgyi
bae38de0d3 fix(contracts): OZ-L1-L05 Outdated OpenZeppelin Library Version (#622)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: zimpha <zimpha@gmail.com>
2023-07-20 16:54:43 +08:00
Xi Lin
c9f623b12b fix(contracts): OZ-L1-L01 Batch Reverting Can Pause Finalization (#634)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 16:36:37 +08:00
Xi Lin
3491f0ebff fix(contracts): OZ-L1-L08 Batch Events Lack Information (#624)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 16:24:27 +08:00
Xi Lin
ff4a9e1dd2 fix(contracts): OZ-L1-L07 Lack of Logs on Sensitive Actions (#623)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 16:10:06 +08:00
Xi Lin
f8ec59f7e1 fix(contracts): OZ-L1-M02 Lack of Upgradeability Storage Gaps (#618)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 15:52:48 +08:00
Xi Lin
571a577231 doc(contracts)/fix(contracts): OZ-L1-M03 WithdrawTrieVerifier Proves Intermediate Nodes (#619)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 15:42:11 +08:00
Lawliet-Chan
e3b451c641 feat(prover-stats-api): add prover stats API (#635)
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 15:33:29 +08:00
Xi Lin
2d6a195d52 fix(contracts): OZ-L1-H03 Incorrect Depth Calculation for Extension Nodes Allows Denial-of-Service (#617)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-20 14:48:09 +08:00
ChuhanJin
577cc90a34 refactor(bridge-history-api): fix go lint (#638)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-18 18:35:45 +08:00
Ahmed Castro
ecd3a61a86 refactor(contracts): add token interfaces extensions (#654) 2023-07-18 15:58:10 +08:00
ChuhanJin
b4cb30e2a1 feat(bridge-history-api): add watch nft batch events watching (#643)
Co-authored-by: vincent <419436363@qq.com>
2023-07-17 21:10:03 +08:00
colin
d9b8891803 refactor(chunk & batch proposer): optimize logging (#653) 2023-07-17 15:28:25 +08:00
ChuhanJin
6db2c0a8cb fix(bridge-history-api): crossMsgs can be empty (#646)
Co-authored-by: vincent <419436363@qq.com>
2023-07-14 17:39:37 +08:00
ChuhanJin
0caf0d4052 fix(bridge-history-api): fix claimable api sql issue (#645)
Co-authored-by: vincent <419436363@qq.com>
2023-07-14 15:26:31 +08:00
vyzo
95f2f7da0f fix(bridge): adjust gas fee cap in resumbitTransaction for rising basefee (#625)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-07-13 17:29:17 +08:00
Xi Lin
d2a1459768 fix(contracts): fix dropping message with nonce 0 (#640)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-13 16:48:45 +08:00
colin
f38dda8e02 feat(relayer): remove 'skipped' proving/rollup status and related code/tests (#642) 2023-07-13 16:39:02 +08:00
HAOYUatHZ
189ef09938 ci(github): only trigger build when merging (#641) 2023-07-13 09:07:26 +08:00
Péter Garamvölgyi
b79832566c feat(coordinator): Remove timestamp from roller protocol (#236) 2023-07-11 20:35:28 -07:00
Xi Lin
6841ef264c feat(contracts): add refund for skipped messages (#561)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-07-11 09:16:37 -07:00
ChuhanJin
425f74e763 feat(bridge-history-api): add new api to fetch all claimable txs under one address (#607)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-07-11 23:10:06 +08:00
georgehao
d59b2b4c41 refactor(coordinator): refactor task_prover's reward to decimal (#633)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2023-07-11 19:25:57 +08:00
Xi Lin
2323dd0daa fix(contracts): OZ-L1-H05 Users Can Lose Refund by Default (#605)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-07-10 10:02:58 -07:00
Péter Garamvölgyi
207d13c453 refactor(db): revise types in DB migrations (#631) 2023-07-10 14:39:25 +02:00
Péter Garamvölgyi
357173848a refactor(orm): set status fields explicitly during DB operations (#628)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-07-10 10:57:33 +02:00
Xi Lin
535ec91141 fix(contracts): OZ-L1-H07 L2 Standard ERC-20 Token Metadata Can Be Set Arbitrarily (#606)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-10 15:31:11 +08:00
Péter Garamvölgyi
8a0b526391 refactor(orm): Make ORM usage consistent (#627) 2023-07-10 09:18:08 +02:00
Steven
1b62c064ad fix(libzkp): replace tag with rev for halo2curves in libzkp (#629)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-07-10 12:41:27 +08:00
HAOYUatHZ
81ae4d3e7b refactor(libzkp): suppress compilation warnings (#575) 2023-07-10 12:28:51 +08:00
georgehao
a83e035845 feat(gorm): adapt the gorm logger with geth logger (#609)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-07-10 12:09:46 +08:00
Xi Lin
96452ee32b feat(contracts): add a simple usdc gateway (#587)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-07-08 11:39:37 +08:00
Xi Lin
811db8bcb9 feat(contracts): request ERC20 through gateway router (#566) 2023-07-07 12:24:44 -07:00
Péter Garamvölgyi
fbd50f3d82 refactor(orm): change chunk_proofs_ready to chunk_proofs_status (#626) 2023-07-07 19:22:24 +02:00
colin
faec817d34 feat(coordinator): upgrade coordinator to rollup v2 (#610)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-07-07 16:23:26 +02:00
ChuhanJin
72ef2cc80e fix(bridge-history-api): fix insert string slice and db type (#614)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2023-07-06 10:48:36 +08:00
Ahmed Castro
8f0690be41 refactor: turn L1ERC721Gateway and L1ERC1155Gateway internal functions virtual (#552)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Xi Lin <zimpha@gmail.com>
2023-07-05 09:48:36 +02:00
432 changed files with 29887 additions and 14736 deletions

View File

@@ -81,7 +81,7 @@ jobs:
- name: Install Go - name: Install Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18.x go-version: 1.19.x
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Install Solc - name: Install Solc

View File

@@ -25,20 +25,20 @@ defaults:
working-directory: 'bridge-history-api' working-directory: 'bridge-history-api'
jobs: jobs:
# check: check:
# if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest runs-on: ubuntu-latest
# steps: steps:
# - name: Install Go - name: Install Go
# uses: actions/setup-go@v2 uses: actions/setup-go@v2
# with: with:
# go-version: 1.19.x go-version: 1.19.x
# - name: Checkout code - name: Checkout code
# uses: actions/checkout@v2 uses: actions/checkout@v2
# - name: Lint - name: Lint
# run: | run: |
# rm -rf $HOME/.cache/golangci-lint rm -rf $HOME/.cache/golangci-lint
# make lint make lint
test: test:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -51,7 +51,6 @@ jobs:
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Test - name: Test
run: | run: |
go get ./...
make test make test
- name: Upload coverage reports to Codecov - name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v3

View File

@@ -77,7 +77,7 @@ jobs:
- name: Install Go - name: Install Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18.x go-version: 1.19.x
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Install Solc - name: Install Solc

View File

@@ -93,7 +93,7 @@ jobs:
- name: Install Go - name: Install Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18.x go-version: 1.19.x
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Install Solc - name: Install Solc

View File

@@ -70,7 +70,7 @@ jobs:
- name: Install Go - name: Install Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18.x go-version: 1.19.x
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Install Solc - name: Install Solc

View File

@@ -22,7 +22,7 @@ jobs:
- name: Install Go - name: Install Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18.x go-version: 1.19.x
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Install Solc - name: Install Solc

View File

@@ -1,4 +1,4 @@
name: Roller name: Prover
on: on:
push: push:
@@ -8,8 +8,8 @@ on:
- develop - develop
- alpha - alpha
paths: paths:
- 'roller/**' - 'prover/**'
- '.github/workflows/roller.yml' - '.github/workflows/prover.yml'
pull_request: pull_request:
types: types:
- opened - opened
@@ -17,18 +17,37 @@ on:
- synchronize - synchronize
- ready_for_review - ready_for_review
paths: paths:
- 'roller/**' - 'prover/**'
- '.github/workflows/roller.yml' - '.github/workflows/prover.yml'
defaults: defaults:
run: run:
working-directory: 'roller' working-directory: 'prover'
jobs: jobs:
test: test:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: prover
compile:
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
toolchain: nightly-2022-12-10 toolchain: nightly-2022-12-10
@@ -46,14 +65,7 @@ jobs:
workspaces: "common/libzkp/impl -> target" workspaces: "common/libzkp/impl -> target"
- name: Test - name: Test
run: | run: |
make roller make prover
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
check: check:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -80,7 +92,7 @@ jobs:
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Install goimports - name: Install goimports
run: go install golang.org/x/tools/cmd/goimports run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/roller/ -w . - run: goimports -local scroll-tech/prover/ -w .
- run: go mod tidy - run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail. # If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy - name: Verify no changes from goimports and go mod tidy

80
.github/workflows/prover_stats_api.yml vendored Normal file
View File

@@ -0,0 +1,80 @@
name: ProverStatsAPI
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
defaults:
run:
working-directory: 'prover-stats-api'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
make test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: prover-stats-api
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/prover-stats-api/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -10,7 +10,7 @@ lint: ## The code's format and security checks.
make -C common lint make -C common lint
make -C coordinator lint make -C coordinator lint
make -C database lint make -C database lint
make -C roller lint make -C prover lint
make -C bridge-history-api lint make -C bridge-history-api lint
update: ## update dependencies update: ## update dependencies
@@ -20,13 +20,13 @@ update: ## update dependencies
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
goimports -local $(PWD)/bridge/ -w . goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/bridge-history-api/ -w . goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w . goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w . goimports -local $(PWD)/coordinator/ -w .
goimports -local $(PWD)/database/ -w . goimports -local $(PWD)/database/ -w .
goimports -local $(PWD)/roller/ -w . goimports -local $(PWD)/prover/ -w .
dev_docker: ## build docker images for development/testing usages dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/ docker build -t scroll_l1geth ./common/docker/l1geth/

View File

@@ -39,6 +39,12 @@ var (
L2WithdrawERC721Sig common.Hash L2WithdrawERC721Sig common.Hash
L2WithdrawERC1155Sig common.Hash L2WithdrawERC1155Sig common.Hash
// batch nft sigs
L1BatchDepositERC721Sig common.Hash
L1BatchDepositERC1155Sig common.Hash
L2BatchWithdrawERC721Sig common.Hash
L2BatchWithdrawERC1155Sig common.Hash
// scroll mono repo // scroll mono repo
// ScrollChainABI holds information about ScrollChain's context and available invokable methods. // ScrollChainABI holds information about ScrollChain's context and available invokable methods.
@@ -116,6 +122,12 @@ func init() {
L2ERC1155GatewayABI, _ = L2ERC1155GatewayMetaData.GetAbi() L2ERC1155GatewayABI, _ = L2ERC1155GatewayMetaData.GetAbi()
L2WithdrawERC1155Sig = L2ERC1155GatewayABI.Events["WithdrawERC1155"].ID L2WithdrawERC1155Sig = L2ERC1155GatewayABI.Events["WithdrawERC1155"].ID
// batch nft events
L1BatchDepositERC721Sig = L1ERC721GatewayABI.Events["BatchDepositERC721"].ID
L1BatchDepositERC1155Sig = L1ERC1155GatewayABI.Events["BatchDepositERC1155"].ID
L2BatchWithdrawERC721Sig = L2ERC721GatewayABI.Events["BatchWithdrawERC721"].ID
L2BatchWithdrawERC1155Sig = L2ERC1155GatewayABI.Events["BatchWithdrawERC1155"].ID
// scroll monorepo // scroll monorepo
ScrollChainABI, _ = ScrollChainMetaData.GetAbi() ScrollChainABI, _ = ScrollChainMetaData.GetAbi()
ScrollChainV2ABI, _ = ScrollChainV2MetaData.GetAbi() ScrollChainV2ABI, _ = ScrollChainV2MetaData.GetAbi()
@@ -277,6 +289,23 @@ type ERC1155MessageEvent struct {
Amount *big.Int Amount *big.Int
} }
type BatchERC721MessageEvent struct {
L1Token common.Address
L2Token common.Address
From common.Address
To common.Address
TokenIDs []*big.Int
}
type BatchERC1155MessageEvent struct {
L1Token common.Address
L2Token common.Address
From common.Address
To common.Address
TokenIDs []*big.Int
TokenAmounts []*big.Int
}
// scroll monorepo // scroll monorepo
// L1SentMessageEvent represents a SentMessage event raised by the L1ScrollMessenger contract. // L1SentMessageEvent represents a SentMessage event raised by the L1ScrollMessenger contract.

View File

@@ -3,92 +3,70 @@ package app
import ( import (
"fmt" "fmt"
"os" "os"
"os/signal"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/iris-contrib/middleware/cors" "github.com/gin-gonic/gin"
"github.com/kataras/iris/v12"
"github.com/kataras/iris/v12/mvc"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"bridge-history-api/config" "bridge-history-api/config"
"bridge-history-api/controller" "bridge-history-api/internal/controller"
"bridge-history-api/db" "bridge-history-api/internal/route"
"bridge-history-api/service" "bridge-history-api/utils"
cutils "bridge-history-api/utils"
) )
var ( var (
app *cli.App app *cli.App
) )
var database db.OrmFactory
func pong(ctx iris.Context) {
ctx.WriteString("pong")
}
func setupQueryByAddressHandler(backend_app *mvc.Application) {
// Register Dependencies.
backend_app.Register(
database,
service.NewHistoryService,
)
// Register Controllers.
backend_app.Handle(new(controller.QueryAddressController))
}
func setupQueryByHashHandler(backend_app *mvc.Application) {
backend_app.Register(
database,
service.NewHistoryService,
)
backend_app.Handle(new(controller.QueryHashController))
}
func init() { func init() {
app = cli.NewApp() app = cli.NewApp()
app.Action = action app.Action = action
app.Name = "Scroll Bridge History Web Service" app.Name = "Scroll Bridge History Web Service"
app.Usage = "The Scroll Bridge History Web Service" app.Usage = "The Scroll Bridge History Web Service"
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return utils.LogSetup(ctx)
} }
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
corsOptions := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"},
AllowCredentials: true,
})
// Load config file. // Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name) cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
} }
database, err = db.NewOrmFactory(cfg) db, err := utils.InitDB(cfg.DB)
if err != nil { if err != nil {
log.Crit("can not connect to database", "err", err) log.Crit("failed to init db", "err", err)
} }
defer database.Close() defer func() {
bridgeApp := iris.New() if deferErr := utils.CloseDB(db); deferErr != nil {
bridgeApp.UseRouter(corsOptions) log.Error("failed to close db", "err", err)
bridgeApp.Get("/ping", pong).Describe("healthcheck") }
}()
// init Prover Stats API
port := cfg.Server.HostPort
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler) router := gin.Default()
mvc.Configure(bridgeApp.Party("/api/txsbyhashes"), setupQueryByHashHandler) controller.InitController(db)
route.Route(router, cfg)
// TODO: make debug mode configurable go func() {
err = bridgeApp.Listen(cfg.Server.HostPort, iris.WithLogLevel("debug")) if runServerErr := router.Run(fmt.Sprintf(":%s", port)); runServerErr != nil {
if err != nil { log.Crit("run http server failure", "error", runServerErr)
log.Crit("can not start server", "err", err) }
} }()
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil return nil
} }

View File

@@ -12,10 +12,10 @@ import (
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"bridge-history-api/config" "bridge-history-api/config"
"bridge-history-api/cross_msg" "bridge-history-api/crossmsg"
"bridge-history-api/cross_msg/message_proof" "bridge-history-api/crossmsg/messageproof"
"bridge-history-api/db" "bridge-history-api/orm"
cutils "bridge-history-api/utils" "bridge-history-api/utils"
) )
var ( var (
@@ -28,17 +28,17 @@ func init() {
app.Action = action app.Action = action
app.Name = "Scroll Bridge History API" app.Name = "Scroll Bridge History API"
app.Usage = "The Scroll Bridge Web Backend" app.Usage = "The Scroll Bridge Web Backend"
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return utils.LogSetup(ctx)
} }
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name) cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -54,15 +54,23 @@ func action(ctx *cli.Context) error {
if err != nil { if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err) log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
} }
db, err := db.NewOrmFactory(cfg)
defer db.Close() db, err := utils.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db", "err", err)
}
defer func() {
if deferErr := utils.CloseDB(db); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
if err != nil { if err != nil {
log.Crit("failed to connect to db", "config file", cfgFile, "error", err) log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
} }
l1worker := &cross_msg.FetchEventWorker{F: cross_msg.L1FetchAndSaveEvents, G: cross_msg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"} l1worker := &crossmsg.FetchEventWorker{F: crossmsg.L1FetchAndSaveEvents, G: crossmsg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"}
l2worker := &cross_msg.FetchEventWorker{F: cross_msg.L2FetchAndSaveEvents, G: cross_msg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"} l2worker := &crossmsg.FetchEventWorker{F: crossmsg.L2FetchAndSaveEvents, G: crossmsg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"}
l1AddressList := []common.Address{ l1AddressList := []common.Address{
common.HexToAddress(cfg.L1.CustomERC20GatewayAddr), common.HexToAddress(cfg.L1.CustomERC20GatewayAddr),
@@ -84,7 +92,7 @@ func action(ctx *cli.Context) error {
common.HexToAddress(cfg.L2.WETHGatewayAddr), common.HexToAddress(cfg.L2.WETHGatewayAddr),
} }
l1crossMsgFetcher, err := cross_msg.NewCrossMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, cross_msg.L1ReorgHandling) l1crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, crossmsg.L1ReorgHandling)
if err != nil { if err != nil {
log.Crit("failed to create l1 cross message fetcher", "error", err) log.Crit("failed to create l1 cross message fetcher", "error", err)
} }
@@ -92,7 +100,7 @@ func action(ctx *cli.Context) error {
go l1crossMsgFetcher.Start() go l1crossMsgFetcher.Start()
defer l1crossMsgFetcher.Stop() defer l1crossMsgFetcher.Stop()
l2crossMsgFetcher, err := cross_msg.NewCrossMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, cross_msg.L2ReorgHandling) l2crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, crossmsg.L2ReorgHandling)
if err != nil { if err != nil {
log.Crit("failed to create l2 cross message fetcher", "error", err) log.Crit("failed to create l2 cross message fetcher", "error", err)
} }
@@ -100,18 +108,20 @@ func action(ctx *cli.Context) error {
go l2crossMsgFetcher.Start() go l2crossMsgFetcher.Start()
defer l2crossMsgFetcher.Stop() defer l2crossMsgFetcher.Stop()
CrossMsgOrm := orm.NewCrossMsg(db)
// BlockTimestamp fetcher for l1 and l2 // BlockTimestamp fetcher for l1 and l2
l1BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db.UpdateL1BlockTimestamp, db.GetL1EarliestNoBlockTimestampHeight) l1BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, CrossMsgOrm.UpdateL1BlockTimestamp, CrossMsgOrm.GetL1EarliestNoBlockTimestampHeight)
go l1BlockTimeFetcher.Start() go l1BlockTimeFetcher.Start()
defer l1BlockTimeFetcher.Stop() defer l1BlockTimeFetcher.Stop()
l2BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, db.UpdateL2BlockTimestamp, db.GetL2EarliestNoBlockTimestampHeight) l2BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, CrossMsgOrm.UpdateL2BlockTimestamp, CrossMsgOrm.GetL2EarliestNoBlockTimestampHeight)
go l2BlockTimeFetcher.Start() go l2BlockTimeFetcher.Start()
defer l2BlockTimeFetcher.Stop() defer l2BlockTimeFetcher.Stop()
// Proof updater and batch fetcher // Proof updater and batch fetcher
l2msgProofUpdater := message_proof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db) l2msgProofUpdater := messageproof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db)
batchFetcher := cross_msg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater) batchFetcher := crossmsg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater)
go batchFetcher.Start() go batchFetcher.Start()
defer batchFetcher.Stop() defer batchFetcher.Stop()

View File

@@ -2,12 +2,11 @@ package app
import ( import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"gorm.io/gorm"
"bridge-history-api/config" "bridge-history-api/config"
"bridge-history-api/db" "bridge-history-api/orm/migrate"
"bridge-history-api/db/migrate"
"bridge-history-api/utils" "bridge-history-api/utils"
) )
@@ -20,14 +19,8 @@ func getConfig(ctx *cli.Context) (*config.Config, error) {
return dbCfg, nil return dbCfg, nil
} }
func initDB(dbCfg *config.Config) (*sqlx.DB, error) { func initDB(dbCfg *config.DBConfig) (*gorm.DB, error) {
factory, err := db.NewOrmFactory(dbCfg) return utils.InitDB(dbCfg)
if err != nil {
return nil, err
}
log.Debug("Got db config from env", "driver name", dbCfg.DB.DriverName, "dsn", dbCfg.DB.DSN)
return factory.GetDB(), nil
} }
// resetDB clean or reset database. // resetDB clean or reset database.
@@ -36,11 +29,15 @@ func resetDB(ctx *cli.Context) error {
if err != nil { if err != nil {
return err return err
} }
db, err := initDB(cfg) gormDB, err := initDB(cfg.DB)
if err != nil { if err != nil {
return err return err
} }
err = migrate.ResetDB(db.DB) db, err := gormDB.DB()
if err != nil {
return err
}
err = migrate.ResetDB(db)
if err != nil { if err != nil {
return err return err
} }
@@ -54,12 +51,15 @@ func checkDBStatus(ctx *cli.Context) error {
if err != nil { if err != nil {
return err return err
} }
db, err := initDB(cfg) gormDB, err := initDB(cfg.DB)
if err != nil { if err != nil {
return err return err
} }
db, err := gormDB.DB()
return migrate.Status(db.DB) if err != nil {
return err
}
return migrate.Status(db)
} }
// dbVersion return the latest version // dbVersion return the latest version
@@ -68,12 +68,15 @@ func dbVersion(ctx *cli.Context) error {
if err != nil { if err != nil {
return err return err
} }
db, err := initDB(cfg) gormDB, err := initDB(cfg.DB)
if err != nil { if err != nil {
return err return err
} }
db, err := gormDB.DB()
version, err := migrate.Current(db.DB) if err != nil {
return err
}
version, err := migrate.Current(db)
log.Info("show database version", "db version", version) log.Info("show database version", "db version", version)
return err return err
@@ -85,12 +88,15 @@ func migrateDB(ctx *cli.Context) error {
if err != nil { if err != nil {
return err return err
} }
db, err := initDB(cfg) gormDB, err := initDB(cfg.DB)
if err != nil { if err != nil {
return err return err
} }
db, err := gormDB.DB()
return migrate.Migrate(db.DB) if err != nil {
return err
}
return migrate.Migrate(db)
} }
// rollbackDB rollback db by version // rollbackDB rollback db by version
@@ -99,10 +105,14 @@ func rollbackDB(ctx *cli.Context) error {
if err != nil { if err != nil {
return err return err
} }
db, err := initDB(cfg) gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil { if err != nil {
return err return err
} }
version := ctx.Int64("version") version := ctx.Int64("version")
return migrate.Rollback(db.DB, &version) return migrate.Rollback(db, &version)
} }

View File

@@ -36,6 +36,6 @@
"maxIdleNum": 20 "maxIdleNum": 20
}, },
"server": { "server": {
"hostPort": "0.0.0.0:20006" "hostPort": "20006"
} }
} }

View File

@@ -6,6 +6,7 @@ import (
"path/filepath" "path/filepath"
) )
// BatchInfoFetcherConfig is the configuration of BatchInfoFetcher
type BatchInfoFetcherConfig struct { type BatchInfoFetcherConfig struct {
BatchIndexStartBlock uint64 `json:"batchIndexStartBlock"` BatchIndexStartBlock uint64 `json:"batchIndexStartBlock"`
ScrollChainAddr string `json:"ScrollChainAddr"` ScrollChainAddr string `json:"ScrollChainAddr"`
@@ -21,6 +22,7 @@ type DBConfig struct {
MaxIdleNum int `json:"maxIdleNum"` MaxIdleNum int `json:"maxIdleNum"`
} }
// LayerConfig is the configuration of Layer1/Layer2
type LayerConfig struct { type LayerConfig struct {
Confirmation uint64 `json:"confirmation"` Confirmation uint64 `json:"confirmation"`
Endpoint string `json:"endpoint"` Endpoint string `json:"endpoint"`
@@ -35,6 +37,7 @@ type LayerConfig struct {
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"` CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
} }
// ServerConfig is the configuration of the bridge history backend server port
type ServerConfig struct { type ServerConfig struct {
HostPort string `json:"hostPort"` HostPort string `json:"hostPort"`
} }

View File

@@ -1,37 +0,0 @@
package controller
import (
"bridge-history-api/model"
"bridge-history-api/service"
"github.com/ethereum/go-ethereum/common"
)
type QueryAddressController struct {
Service service.HistoryService
}
type QueryHashController struct {
Service service.HistoryService
}
func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
if err != nil {
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
}
return &model.QueryByAddressResponse{Message: "ok",
Data: &model.Data{
Result: message,
Total: total,
}}, nil
}
func (c *QueryHashController) Post(req model.QueryByHashRequest) (*model.QueryByHashResponse, error) {
result, err := c.Service.GetTxsByHashes(req.Txs)
if err != nil {
return &model.QueryByHashResponse{Message: "500", Data: &model.Data{}}, err
}
return &model.QueryByHashResponse{Message: "ok", Data: &model.Data{Result: result}}, nil
}

View File

@@ -1,275 +0,0 @@
package cross_msg
import (
"context"
"math/big"
geth "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
backendabi "bridge-history-api/abi"
"bridge-history-api/db"
"bridge-history-api/db/orm"
"bridge-history-api/utils"
)
// Todo : read from config
var (
// the number of blocks fetch per round
fetchLimit = int64(3000)
)
// FetchAndSave is a function type that fetches events from blockchain and saves them to database
type FetchAndSave func(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addressList []common.Address) error
// GetLatestProcessed is a function type that gets the latest processed block height from database
type GetLatestProcessed func(db db.OrmFactory) (int64, error)
type UpdateXHash func(ctx context.Context)
type FetchEventWorker struct {
F FetchAndSave
G GetLatestProcessed
Name string
}
func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
crossHeight, err := db.GetLatestL1ProcessedHeight()
if err != nil {
log.Error("failed to get L1 cross message processed height: ", "err", err)
return 0, err
}
relayedHeight, err := db.GetLatestRelayedHeightOnL1()
if err != nil {
log.Error("failed to get L1 relayed message processed height: ", "err", err)
return 0, err
}
if crossHeight > relayedHeight {
return crossHeight, nil
} else {
return relayedHeight, nil
}
}
func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
crossHeight, err := db.GetLatestL2ProcessedHeight()
if err != nil {
log.Error("failed to get L2 cross message processed height", "err", err)
return 0, err
}
relayedHeight, err := db.GetLatestRelayedHeightOnL2()
if err != nil {
log.Error("failed to get L2 relayed message processed height", "err", err)
return 0, err
}
l2SentHeight, err := db.GetLatestSentMsgHeightOnL2()
if err != nil {
log.Error("failed to get L2 sent message processed height", "err", err)
return 0, err
}
maxHeight := crossHeight
if maxHeight < relayedHeight {
maxHeight = relayedHeight
}
if maxHeight < l2SentHeight {
maxHeight = l2SentHeight
}
return maxHeight, nil
}
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: addrList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1RelayedMessageEventSignature
query.Topics[0][3] = backendabi.L1SentMessageEventSignature
query.Topics[0][4] = backendabi.L1DepositERC721Sig
query.Topics[0][5] = backendabi.L1DepositERC1155Sig
query.Topics[0][6] = backendabi.L1DepositWETHSig
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get l1 event logs", "err", err)
return err
}
depositL1CrossMsgs, msgHashes, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
if err != nil {
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
for i := range depositL1CrossMsgs {
for _, msgHash := range msgHashes {
if depositL1CrossMsgs[i].Layer1Hash == msgHash.TxHash.Hex() {
depositL1CrossMsgs[i].MsgHash = msgHash.MsgHash.Hex()
break
}
}
}
dbTx, err := database.Beginx()
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
return err
}
err = database.BatchInsertL1CrossMsgDBTx(dbTx, depositL1CrossMsgs)
if err != nil {
dbTx.Rollback()
log.Crit("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
}
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
if err != nil {
dbTx.Rollback()
log.Crit("l1FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
dbTx.Rollback()
log.Error("l1FetchAndSaveEvents: Failed to commit db transaction", "err", err)
return err
}
return nil
}
func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: addrList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L2WithdrawETHSig
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
query.Topics[0][2] = backendabi.L2RelayedMessageEventSignature
query.Topics[0][3] = backendabi.L2SentMessageEventSignature
query.Topics[0][4] = backendabi.L2WithdrawERC721Sig
query.Topics[0][5] = backendabi.L2WithdrawERC1155Sig
query.Topics[0][6] = backendabi.L2WithdrawWETHSig
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get l2 event logs", "err", err)
return err
}
depositL2CrossMsgs, relayedMsg, L2SentMsgWrappers, err := utils.ParseBackendL2EventLogs(logs)
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
var l2SentMsgs []*orm.L2SentMsg
for i := range depositL2CrossMsgs {
for _, l2SentMsgWrapper := range L2SentMsgWrappers {
if depositL2CrossMsgs[i].Layer2Hash == l2SentMsgWrapper.TxHash.Hex() {
depositL2CrossMsgs[i].MsgHash = l2SentMsgWrapper.L2SentMsg.MsgHash
l2SentMsgWrapper.L2SentMsg.TxSender = depositL2CrossMsgs[i].Sender
l2SentMsgs = append(l2SentMsgs, l2SentMsgWrapper.L2SentMsg)
break
}
}
}
dbTx, err := database.Beginx()
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
return err
}
err = database.BatchInsertL2CrossMsgDBTx(dbTx, depositL2CrossMsgs)
if err != nil {
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
}
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
if err != nil {
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
}
if len(l2SentMsgs) > 0 {
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
if err != nil {
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
}
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
dbTx.Rollback()
log.Error("l2FetchAndSaveEvents: Failed to commit db transaction", "err", err)
return err
}
return nil
}
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, scrollChainAddr common.Address) error {
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{scrollChainAddr},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 1)
query.Topics[0][0] = backendabi.L1CommitBatchEventSignature
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get batch commit event logs", "err", err)
return err
}
rollupBatches, err := utils.ParseBatchInfoFromScrollChain(ctx, client, logs)
if err != nil {
log.Error("FetchAndSaveBatchIndex: Failed to parse batch commit msg event logs", "err", err)
return err
}
dbTx, err := database.Beginx()
if err != nil {
log.Error("FetchAndSaveBatchIndex: Failed to begin db transaction", "err", err)
return err
}
err = database.BatchInsertRollupBatchDBTx(dbTx, rollupBatches)
if err != nil {
dbTx.Rollback()
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", err)
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
dbTx.Rollback()
log.Error("FetchAndSaveBatchIndex: Failed to commit db transaction", "err", err)
return err
}
return nil
}
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
if err != nil {
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.TxHash, "err", err)
continue
}
}
return nil
}
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
if err != nil {
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.TxHash, "err", err)
continue
}
}
return nil
}

View File

@@ -1,109 +0,0 @@
package cross_msg
import (
"context"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"bridge-history-api/db"
)
type ReorgHandling func(ctx context.Context, reorgHeight int64, db db.OrmFactory) error
func reverseArray(arr []*types.Header) []*types.Header {
for i := 0; i < len(arr)/2; i++ {
j := len(arr) - i - 1
arr[i], arr[j] = arr[j], arr[i]
}
return arr
}
func IsParentAndChild(parentHeader *types.Header, header *types.Header) bool {
return header.ParentHash == parentHeader.Hash()
}
func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []*types.Header {
mergedArr := append(baseArr, extraArr...)
if len(mergedArr) <= maxLength {
return mergedArr
}
startIndex := len(mergedArr) - maxLength
return mergedArr[startIndex:]
}
func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, header *types.Header) (int, bool, []*types.Header) {
maxStep := len(headers)
backwardHeaderList := []*types.Header{header}
for iterRound := 0; iterRound < maxStep; iterRound++ {
header, err := client.HeaderByHash(ctx, header.ParentHash)
if err != nil {
log.Error("BackwardFindReorgBlock failed", "error", err)
return -1, false, nil
}
backwardHeaderList = append(backwardHeaderList, header)
for j := len(headers) - 1; j >= 0; j-- {
if IsParentAndChild(headers[j], header) {
backwardHeaderList = reverseArray(backwardHeaderList)
return j, true, backwardHeaderList
}
}
}
return -1, false, nil
}
func L1ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
dbTx, err := db.Beginx()
if err != nil {
log.Crit("begin db tx failed", "err", err)
}
err = db.DeleteL1CrossMsgAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
log.Crit("delete l1 cross msg from height", "height", reorgHeight, "err", err)
}
err = db.DeleteL1RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
log.Crit("delete l1 relayed hash from height", "height", reorgHeight, "err", err)
}
err = dbTx.Commit()
if err != nil {
dbTx.Rollback()
log.Error("commit tx failed", "err", err)
return err
}
return nil
}
func L2ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
dbTx, err := db.Beginx()
if err != nil {
dbTx.Rollback()
log.Crit("begin db tx failed", "err", err)
}
err = db.DeleteL2CrossMsgFromHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
log.Crit("delete l2 cross msg from height", "height", reorgHeight, "err", err)
}
err = db.DeleteL2RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
log.Crit("delete l2 relayed hash from height", "height", reorgHeight, "err", err)
}
err = db.DeleteL2SentMsgAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
log.Crit("delete l2 sent msg from height", "height", reorgHeight, "err", err)
}
err = dbTx.Commit()
if err != nil {
dbTx.Rollback()
log.Error("commit tx failed", "err", err)
return err
}
return nil
}

View File

@@ -1,4 +1,4 @@
package cross_msg package crossmsg
import ( import (
"context" "context"
@@ -7,12 +7,14 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
"bridge-history-api/cross_msg/message_proof" "bridge-history-api/crossmsg/messageproof"
"bridge-history-api/db" "bridge-history-api/orm"
"bridge-history-api/utils" "bridge-history-api/utils"
) )
// BatchInfoFetcher fetches batch info from l1 chain and update db
type BatchInfoFetcher struct { type BatchInfoFetcher struct {
ctx context.Context ctx context.Context
scrollChainAddr common.Address scrollChainAddr common.Address
@@ -20,11 +22,13 @@ type BatchInfoFetcher struct {
confirmation uint64 confirmation uint64
blockTimeInSec int blockTimeInSec int
client *ethclient.Client client *ethclient.Client
db db.OrmFactory db *gorm.DB
msgProofUpdater *message_proof.MsgProofUpdater rollupOrm *orm.RollupBatch
msgProofUpdater *messageproof.MsgProofUpdater
} }
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db db.OrmFactory, msgProofUpdater *message_proof.MsgProofUpdater) *BatchInfoFetcher { // NewBatchInfoFetcher creates a new BatchInfoFetcher instance
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db *gorm.DB, msgProofUpdater *messageproof.MsgProofUpdater) *BatchInfoFetcher {
return &BatchInfoFetcher{ return &BatchInfoFetcher{
ctx: ctx, ctx: ctx,
scrollChainAddr: scrollChainAddr, scrollChainAddr: scrollChainAddr,
@@ -33,17 +37,19 @@ func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, ba
blockTimeInSec: blockTimeInSec, blockTimeInSec: blockTimeInSec,
client: client, client: client,
db: db, db: db,
rollupOrm: orm.NewRollupBatch(db),
msgProofUpdater: msgProofUpdater, msgProofUpdater: msgProofUpdater,
} }
} }
// Start the BatchInfoFetcher
func (b *BatchInfoFetcher) Start() { func (b *BatchInfoFetcher) Start() {
log.Info("BatchInfoFetcher Start") log.Info("BatchInfoFetcher Start")
// Fetch batch info at beginning // Fetch batch info at beginning
// Then start msg proof updater after db have some bridge batch // Then start msg proof updater after db have some bridge batch
err := b.fetchBatchInfo() err := b.fetchBatchInfo()
if err != nil { if err != nil {
log.Error("fetch batch info at begining failed: ", "err", err) log.Error("fetch batch info at beginning failed: ", "err", err)
} }
go b.msgProofUpdater.Start() go b.msgProofUpdater.Start()
@@ -65,6 +71,7 @@ func (b *BatchInfoFetcher) Start() {
}() }()
} }
// Stop the BatchInfoFetcher and call msg proof updater to stop
func (b *BatchInfoFetcher) Stop() { func (b *BatchInfoFetcher) Stop() {
log.Info("BatchInfoFetcher Stop") log.Info("BatchInfoFetcher Stop")
b.msgProofUpdater.Stop() b.msgProofUpdater.Stop()
@@ -76,19 +83,20 @@ func (b *BatchInfoFetcher) fetchBatchInfo() error {
log.Error("Can not get latest block number: ", "err", err) log.Error("Can not get latest block number: ", "err", err)
return err return err
} }
latestBatch, err := b.db.GetLatestRollupBatch() latestBatchHeight, err := b.rollupOrm.GetLatestRollupBatchProcessedHeight(b.ctx)
if err != nil { if err != nil {
log.Error("Can not get latest BatchInfo: ", "err", err) log.Error("Can not get latest BatchInfo: ", "err", err)
return err return err
} }
var startHeight uint64 var startHeight uint64
if latestBatch == nil { if latestBatchHeight == 0 {
log.Info("no batch record in database, start from batchInfoStartNumber", "batchInfoStartNumber", b.batchInfoStartNumber)
startHeight = b.batchInfoStartNumber startHeight = b.batchInfoStartNumber
} else { } else {
startHeight = latestBatch.CommitHeight + 1 startHeight = latestBatchHeight + 1
} }
for from := startHeight; number >= from; from += uint64(fetchLimit) { for from := startHeight; number >= from; from += fetchLimit {
to := from + uint64(fetchLimit) - 1 to := from + fetchLimit - 1
// number - confirmation can never less than 0 since the for loop condition // number - confirmation can never less than 0 since the for loop condition
// but watch out the overflow // but watch out the overflow
if to > number { if to > number {

View File

@@ -1,4 +1,4 @@
package cross_msg package crossmsg
import ( import (
"context" "context"
@@ -9,9 +9,13 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
type GetEarliestNoBlockTimestampHeightFunc func() (uint64, error) // GetEarliestNoBlockTimestampHeightFunc is a function type that gets the earliest record without block timestamp from database
type UpdateBlockTimestampFunc func(height uint64, timestamp time.Time) error type GetEarliestNoBlockTimestampHeightFunc func(ctx context.Context) (uint64, error)
// UpdateBlockTimestampFunc is a function type that updates block timestamp into database
type UpdateBlockTimestampFunc func(ctx context.Context, height uint64, timestamp time.Time) error
// BlockTimestampFetcher fetches block timestamp from blockchain and saves them to database
type BlockTimestampFetcher struct { type BlockTimestampFetcher struct {
ctx context.Context ctx context.Context
confirmation uint64 confirmation uint64
@@ -21,6 +25,7 @@ type BlockTimestampFetcher struct {
getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc
} }
// NewBlockTimestampFetcher creates a new BlockTimestampFetcher instance
func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTimeInSec int, client *ethclient.Client, updateBlockTimestampFunc UpdateBlockTimestampFunc, getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc) *BlockTimestampFetcher { func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTimeInSec int, client *ethclient.Client, updateBlockTimestampFunc UpdateBlockTimestampFunc, getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc) *BlockTimestampFetcher {
return &BlockTimestampFetcher{ return &BlockTimestampFetcher{
ctx: ctx, ctx: ctx,
@@ -32,6 +37,7 @@ func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTim
} }
} }
// Start the BlockTimestampFetcher
func (b *BlockTimestampFetcher) Start() { func (b *BlockTimestampFetcher) Start() {
go func() { go func() {
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second) tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
@@ -46,7 +52,7 @@ func (b *BlockTimestampFetcher) Start() {
log.Error("Can not get latest block number", "err", err) log.Error("Can not get latest block number", "err", err)
continue continue
} }
startHeight, err := b.getEarliestNoBlockTimestampHeightFunc() startHeight, err := b.getEarliestNoBlockTimestampHeightFunc(b.ctx)
if err != nil { if err != nil {
log.Error("Can not get latest record without block timestamp", "err", err) log.Error("Can not get latest record without block timestamp", "err", err)
continue continue
@@ -57,12 +63,12 @@ func (b *BlockTimestampFetcher) Start() {
log.Error("Can not get block by number", "err", err) log.Error("Can not get block by number", "err", err)
break break
} }
err = b.updateBlockTimestampFunc(height, time.Unix(int64(block.Time), 0)) err = b.updateBlockTimestampFunc(b.ctx, height, time.Unix(int64(block.Time), 0))
if err != nil { if err != nil {
log.Error("Can not update blockTimestamp into DB ", "err", err) log.Error("Can not update blockTimestamp into DB ", "err", err)
break break
} }
height, err = b.getEarliestNoBlockTimestampHeightFunc() height, err = b.getEarliestNoBlockTimestampHeightFunc(b.ctx)
if err != nil { if err != nil {
log.Error("Can not get latest record without block timestamp", "err", err) log.Error("Can not get latest record without block timestamp", "err", err)
break break
@@ -73,6 +79,7 @@ func (b *BlockTimestampFetcher) Start() {
}() }()
} }
// Stop the BlockTimestampFetcher and log the info
func (b *BlockTimestampFetcher) Stop() { func (b *BlockTimestampFetcher) Stop() {
log.Info("BlockTimestampFetcher Stop") log.Info("BlockTimestampFetcher Stop")
} }

View File

@@ -1,4 +1,4 @@
package cross_msg package crossmsg
import ( import (
"context" "context"
@@ -12,16 +12,17 @@ import (
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/modern-go/reflect2" "github.com/modern-go/reflect2"
"gorm.io/gorm"
"bridge-history-api/config" "bridge-history-api/config"
"bridge-history-api/db"
"bridge-history-api/utils" "bridge-history-api/utils"
) )
type CrossMsgFetcher struct { // MsgFetcher fetches cross message events from blockchain and saves them to database
type MsgFetcher struct {
ctx context.Context ctx context.Context
config *config.LayerConfig config *config.LayerConfig
db db.OrmFactory db *gorm.DB
client *ethclient.Client client *ethclient.Client
worker *FetchEventWorker worker *FetchEventWorker
reorgHandling ReorgHandling reorgHandling ReorgHandling
@@ -32,8 +33,9 @@ type CrossMsgFetcher struct {
reorgEndCh chan struct{} reorgEndCh chan struct{}
} }
func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*CrossMsgFetcher, error) { // NewMsgFetcher creates a new MsgFetcher instance
crossMsgFetcher := &CrossMsgFetcher{ func NewMsgFetcher(ctx context.Context, config *config.LayerConfig, db *gorm.DB, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*MsgFetcher, error) {
msgFetcher := &MsgFetcher{
ctx: ctx, ctx: ctx,
config: config, config: config,
db: db, db: db,
@@ -45,11 +47,12 @@ func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.O
reorgStartCh: make(chan struct{}), reorgStartCh: make(chan struct{}),
reorgEndCh: make(chan struct{}), reorgEndCh: make(chan struct{}),
} }
return crossMsgFetcher, nil return msgFetcher, nil
} }
func (c *CrossMsgFetcher) Start() { // Start the MsgFetcher
log.Info("CrossMsgFetcher Start") func (c *MsgFetcher) Start() {
log.Info("MsgFetcher Start")
// fetch missing events from finalized blocks, we don't handle reorgs here // fetch missing events from finalized blocks, we don't handle reorgs here
c.forwardFetchAndSaveMissingEvents(c.config.Confirmation) c.forwardFetchAndSaveMissingEvents(c.config.Confirmation)
@@ -94,12 +97,13 @@ func (c *CrossMsgFetcher) Start() {
}() }()
} }
func (c *CrossMsgFetcher) Stop() { // Stop the MsgFetcher and log the info
log.Info("CrossMsgFetcher Stop") func (c *MsgFetcher) Stop() {
log.Info("MsgFetcher Stop")
} }
// forwardFetchAndSaveMissingEvents will fetch all events from the latest processed height to the latest block number. // forwardFetchAndSaveMissingEvents will fetch all events from the latest processed height to the latest block number.
func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) { func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
// if we fetch to the latest block, shall not exceed cachedHeaders // if we fetch to the latest block, shall not exceed cachedHeaders
var number uint64 var number uint64
var err error var err error
@@ -116,22 +120,23 @@ func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64)
log.Error(fmt.Sprintf("%s: invalid get/fetch function", c.worker.Name)) log.Error(fmt.Sprintf("%s: invalid get/fetch function", c.worker.Name))
return return
} }
processedHeight, err := c.worker.G(c.db) processedHeight, err := c.worker.G(c.ctx, c.db)
if err != nil { if err != nil {
log.Error(fmt.Sprintf("%s: can not get latest processed block height", c.worker.Name)) log.Error(fmt.Sprintf("%s: can not get latest processed block height", c.worker.Name))
} }
log.Info(fmt.Sprintf("%s: ", c.worker.Name), "height", processedHeight) log.Info(fmt.Sprintf("%s: ", c.worker.Name), "height", processedHeight)
if processedHeight <= 0 || processedHeight < int64(c.config.StartHeight) { if processedHeight <= 0 || processedHeight < c.config.StartHeight {
processedHeight = int64(c.config.StartHeight) processedHeight = c.config.StartHeight
} else { } else {
processedHeight += 1 processedHeight++
} }
for from := processedHeight; from <= int64(number); from += fetchLimit { for from := processedHeight; from <= number; from += fetchLimit {
to := from + fetchLimit - 1 to := from + fetchLimit - 1
if to > int64(number) { if to > number {
to = int64(number) to = number
} }
err := c.worker.F(c.ctx, c.client, c.db, from, to, c.addressList) // watch for overflow here, tho its unlikely to happen
err := c.worker.F(c.ctx, c.client, c.db, int64(from), int64(to), c.addressList)
if err != nil { if err != nil {
log.Error(fmt.Sprintf("%s: failed!", c.worker.Name), "err", err) log.Error(fmt.Sprintf("%s: failed!", c.worker.Name), "err", err)
break break
@@ -139,7 +144,7 @@ func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64)
} }
} }
func (c *CrossMsgFetcher) fetchMissingLatestHeaders() { func (c *MsgFetcher) fetchMissingLatestHeaders() {
var start int64 var start int64
number, err := c.client.BlockNumber(c.ctx) number, err := c.client.BlockNumber(c.ctx)
if err != nil { if err != nil {
@@ -159,7 +164,7 @@ func (c *CrossMsgFetcher) fetchMissingLatestHeaders() {
close(c.reorgEndCh) close(c.reorgEndCh)
return return
default: default:
header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(int64(i))) header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(i))
if err != nil { if err != nil {
log.Error("failed to get latest header", "err", err) log.Error("failed to get latest header", "err", err)
return return
@@ -181,13 +186,13 @@ func (c *CrossMsgFetcher) fetchMissingLatestHeaders() {
c.mu.Lock() c.mu.Lock()
index, ok, validHeaders := BackwardFindReorgBlock(c.ctx, c.cachedHeaders, c.client, header) index, ok, validHeaders := BackwardFindReorgBlock(c.ctx, c.cachedHeaders, c.client, header)
if !ok { if !ok {
log.Error("Reorg happended too earlier than cached headers", "reorg height", header.Number) log.Error("Reorg happened too earlier than cached headers", "reorg height", header.Number)
num, err := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation) num, getSafeErr := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation)
if err != nil { if getSafeErr != nil {
log.Crit("Can not get safe number during reorg, quit the process", "err", err) log.Crit("Can not get safe number during reorg, quit the process", "err", err)
} }
// clear all our saved data, because no data is safe now // clear all our saved data, because no data is safe now
err = c.reorgHandling(c.ctx, int64(num), c.db) err = c.reorgHandling(c.ctx, num, c.db)
// if handling success then we can update the cachedHeaders // if handling success then we can update the cachedHeaders
if err == nil { if err == nil {
c.cachedHeaders = c.cachedHeaders[:0] c.cachedHeaders = c.cachedHeaders[:0]
@@ -196,7 +201,7 @@ func (c *CrossMsgFetcher) fetchMissingLatestHeaders() {
c.reorgEndCh <- struct{}{} c.reorgEndCh <- struct{}{}
return return
} }
err = c.reorgHandling(c.ctx, c.cachedHeaders[index].Number.Int64(), c.db) err = c.reorgHandling(c.ctx, c.cachedHeaders[index].Number.Uint64(), c.db)
// if handling success then we can update the cachedHeaders // if handling success then we can update the cachedHeaders
if err == nil { if err == nil {
c.cachedHeaders = c.cachedHeaders[:index+1] c.cachedHeaders = c.cachedHeaders[:index+1]

View File

@@ -0,0 +1,213 @@
package crossmsg
import (
"context"
"math/big"
geth "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
backendabi "bridge-history-api/abi"
"bridge-history-api/orm"
"bridge-history-api/utils"
)
// Todo : read from config
var (
// the number of blocks fetch per round
fetchLimit = uint64(3000)
)
// FetchAndSave is a function type that fetches events from blockchain and saves them to database
type FetchAndSave func(ctx context.Context, client *ethclient.Client, database *gorm.DB, from int64, to int64, addressList []common.Address) error
// GetLatestProcessed is a function type that gets the latest processed block height from database
type GetLatestProcessed func(ctx context.Context, db *gorm.DB) (uint64, error)
// FetchEventWorker defines worker with fetch and save function, processed number getter, and name
type FetchEventWorker struct {
F FetchAndSave
G GetLatestProcessed
Name string
}
// GetLatestL1ProcessedHeight get L1 the latest processed height
func GetLatestL1ProcessedHeight(ctx context.Context, db *gorm.DB) (uint64, error) {
l1CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
crossHeight, err := l1CrossMsgOrm.GetLatestL1ProcessedHeight(ctx)
if err != nil {
log.Error("failed to get L1 cross message processed height: ", "err", err)
return 0, err
}
relayedHeight, err := relayedOrm.GetLatestRelayedHeightOnL1(ctx)
if err != nil {
log.Error("failed to get L1 relayed message processed height: ", "err", err)
return 0, err
}
if crossHeight > relayedHeight {
return crossHeight, nil
}
return relayedHeight, nil
}
// GetLatestL2ProcessedHeight get L2 latest processed height
func GetLatestL2ProcessedHeight(ctx context.Context, db *gorm.DB) (uint64, error) {
l2CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
l2SentMsgOrm := orm.NewL2SentMsg(db)
crossHeight, err := l2CrossMsgOrm.GetLatestL2ProcessedHeight(ctx)
if err != nil {
log.Error("failed to get L2 cross message processed height", "err", err)
return 0, err
}
relayedHeight, err := relayedOrm.GetLatestRelayedHeightOnL2(ctx)
if err != nil {
log.Error("failed to get L2 relayed message processed height", "err", err)
return 0, err
}
l2SentHeight, err := l2SentMsgOrm.GetLatestSentMsgHeightOnL2(ctx)
if err != nil {
log.Error("failed to get L2 sent message processed height", "err", err)
return 0, err
}
maxHeight := crossHeight
if maxHeight < relayedHeight {
maxHeight = relayedHeight
}
if maxHeight < l2SentHeight {
maxHeight = l2SentHeight
}
return maxHeight, nil
}
// L1FetchAndSaveEvents fetch and save events on L1
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, addrList []common.Address) error {
l1CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: addrList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1RelayedMessageEventSignature
query.Topics[0][3] = backendabi.L1SentMessageEventSignature
query.Topics[0][4] = backendabi.L1DepositERC721Sig
query.Topics[0][5] = backendabi.L1DepositERC1155Sig
query.Topics[0][6] = backendabi.L1DepositWETHSig
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get l1 event logs", "err", err)
return err
}
depositL1CrossMsgs, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
if err != nil {
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
err = db.Transaction(func(tx *gorm.DB) error {
if txErr := l1CrossMsgOrm.InsertL1CrossMsg(ctx, depositL1CrossMsgs, tx); txErr != nil {
log.Error("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", txErr)
return txErr
}
if txErr := relayedOrm.InsertRelayedMsg(ctx, relayedMsg, tx); txErr != nil {
log.Error("l1FetchAndSaveEvents: Failed to insert relayed msg event logs", "err", txErr)
return txErr
}
return nil
})
if err != nil {
log.Crit("l2FetchAndSaveEvents: Failed to finish transaction", "err", err)
}
return err
}
// L2FetchAndSaveEvents fetche and save events on L2
func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, addrList []common.Address) error {
l2CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
l2SentMsgOrm := orm.NewL2SentMsg(db)
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: addrList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L2WithdrawETHSig
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
query.Topics[0][2] = backendabi.L2RelayedMessageEventSignature
query.Topics[0][3] = backendabi.L2SentMessageEventSignature
query.Topics[0][4] = backendabi.L2WithdrawERC721Sig
query.Topics[0][5] = backendabi.L2WithdrawERC1155Sig
query.Topics[0][6] = backendabi.L2WithdrawWETHSig
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get l2 event logs", "err", err)
return err
}
depositL2CrossMsgs, relayedMsg, l2SentMsgs, err := utils.ParseBackendL2EventLogs(logs)
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
err = db.Transaction(func(tx *gorm.DB) error {
if txErr := l2CrossMsgOrm.InsertL2CrossMsg(ctx, depositL2CrossMsgs, tx); txErr != nil {
log.Error("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", txErr)
return txErr
}
if txErr := relayedOrm.InsertRelayedMsg(ctx, relayedMsg, tx); txErr != nil {
log.Error("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", txErr)
return txErr
}
if txErr := l2SentMsgOrm.InsertL2SentMsg(ctx, l2SentMsgs, tx); txErr != nil {
log.Error("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", txErr)
return txErr
}
return nil
})
if err != nil {
log.Crit("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
}
return err
}
// FetchAndSaveBatchIndex fetche and save batch index
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, scrollChainAddr common.Address) error {
rollupBatchOrm := orm.NewRollupBatch(db)
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{scrollChainAddr},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 1)
query.Topics[0][0] = backendabi.L1CommitBatchEventSignature
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get batch commit event logs", "err", err)
return err
}
rollupBatches, err := utils.ParseBatchInfoFromScrollChain(ctx, client, logs)
if err != nil {
log.Error("FetchAndSaveBatchIndex: Failed to parse batch commit msg event logs", "err", err)
return err
}
if txErr := rollupBatchOrm.InsertRollupBatch(ctx, rollupBatches); txErr != nil {
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", txErr)
return txErr
}
return nil
}

View File

@@ -1,33 +1,38 @@
package message_proof package messageproof
import ( import (
"context" "context"
"database/sql"
"errors"
"fmt" "fmt"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
"bridge-history-api/db" "bridge-history-api/orm"
"bridge-history-api/db/orm"
) )
// MsgProofUpdater is used to update message proof in db
type MsgProofUpdater struct { type MsgProofUpdater struct {
ctx context.Context ctx context.Context
db db.OrmFactory db *gorm.DB
l2SentMsgOrm *orm.L2SentMsg
rollupOrm *orm.RollupBatch
withdrawTrie *WithdrawTrie withdrawTrie *WithdrawTrie
} }
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db db.OrmFactory) *MsgProofUpdater { // NewMsgProofUpdater new MsgProofUpdater instance
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db *gorm.DB) *MsgProofUpdater {
return &MsgProofUpdater{ return &MsgProofUpdater{
ctx: ctx, ctx: ctx,
db: db, db: db,
l2SentMsgOrm: orm.NewL2SentMsg(db),
rollupOrm: orm.NewRollupBatch(db),
withdrawTrie: NewWithdrawTrie(), withdrawTrie: NewWithdrawTrie(),
} }
} }
// Start the MsgProofUpdater
func (m *MsgProofUpdater) Start() { func (m *MsgProofUpdater) Start() {
log.Info("MsgProofUpdater Start") log.Info("MsgProofUpdater Start")
m.initialize(m.ctx) m.initialize(m.ctx)
@@ -39,7 +44,7 @@ func (m *MsgProofUpdater) Start() {
tick.Stop() tick.Stop()
return return
case <-tick.C: case <-tick.C:
latestBatch, err := m.db.GetLatestRollupBatch() latestBatch, err := m.rollupOrm.GetLatestRollupBatch(m.ctx)
if err != nil { if err != nil {
log.Warn("MsgProofUpdater: Can not get latest RollupBatch: ", "err", err) log.Warn("MsgProofUpdater: Can not get latest RollupBatch: ", "err", err)
continue continue
@@ -47,7 +52,7 @@ func (m *MsgProofUpdater) Start() {
if latestBatch == nil { if latestBatch == nil {
continue continue
} }
latestBatchIndexWithProof, err := m.db.GetLatestL2SentMsgBatchIndex() latestBatchIndexWithProof, err := m.l2SentMsgOrm.GetLatestL2SentMsgBatchIndex(m.ctx)
if err != nil { if err != nil {
log.Error("MsgProofUpdater: Can not get latest L2SentMsgBatchIndex: ", "err", err) log.Error("MsgProofUpdater: Can not get latest L2SentMsgBatchIndex: ", "err", err)
continue continue
@@ -59,7 +64,7 @@ func (m *MsgProofUpdater) Start() {
start = uint64(latestBatchIndexWithProof) + 1 start = uint64(latestBatchIndexWithProof) + 1
} }
for i := start; i <= latestBatch.BatchIndex; i++ { for i := start; i <= latestBatch.BatchIndex; i++ {
batch, err := m.db.GetRollupBatchByIndex(i) batch, err := m.rollupOrm.GetRollupBatchByIndex(m.ctx, i)
if err != nil { if err != nil {
log.Error("MsgProofUpdater: Can not get RollupBatch: ", "err", err, "index", i) log.Error("MsgProofUpdater: Can not get RollupBatch: ", "err", err, "index", i)
break break
@@ -70,8 +75,16 @@ func (m *MsgProofUpdater) Start() {
log.Error("MsgProofUpdater: can not append l2messages", "startBlockNumber", batch.StartBlockNumber, "endBlockNumber", batch.EndBlockNumber, "err", err) log.Error("MsgProofUpdater: can not append l2messages", "startBlockNumber", batch.StartBlockNumber, "endBlockNumber", batch.EndBlockNumber, "err", err)
break break
} }
// here we update batch withdraw root
err = m.rollupOrm.UpdateRollupBatchWithdrawRoot(m.ctx, batch.BatchIndex, m.withdrawTrie.MessageRoot().Hex())
if err != nil {
// if failed better restart the binary
log.Error("MsgProofUpdater: can not update batch withdraw root", "err", err)
break
}
err = m.updateMsgProof(msgs, proofs, batch.BatchIndex) err = m.updateMsgProof(msgs, proofs, batch.BatchIndex)
if err != nil { if err != nil {
// if failed better restart the binary
log.Error("MsgProofUpdater: can not update msg proof", "err", err) log.Error("MsgProofUpdater: can not update msg proof", "err", err)
break break
} }
@@ -83,6 +96,7 @@ func (m *MsgProofUpdater) Start() {
} }
// Stop the MsgProofUpdater
func (m *MsgProofUpdater) Stop() { func (m *MsgProofUpdater) Stop() {
log.Info("MsgProofUpdater Stop") log.Info("MsgProofUpdater Stop")
} }
@@ -107,19 +121,19 @@ func (m *MsgProofUpdater) initialize(ctx context.Context) {
func (m *MsgProofUpdater) initializeWithdrawTrie() error { func (m *MsgProofUpdater) initializeWithdrawTrie() error {
var batch *orm.RollupBatch var batch *orm.RollupBatch
firstMsg, err := m.db.GetL2SentMessageByNonce(0) firstMsg, err := m.l2SentMsgOrm.GetL2SentMessageByNonce(m.ctx, 0)
if err != nil && !errors.Is(err, sql.ErrNoRows) { if err != nil {
return fmt.Errorf("failed to get first l2 message: %v", err) return fmt.Errorf("failed to get first l2 message: %v", err)
} }
// no l2 message // no l2 message
// TO DO: check if we realy dont have l2 sent message with nonce 0 // TO DO: check if we really dont have l2 sent message with nonce 0
if firstMsg == nil { if firstMsg == nil {
log.Info("No first l2sentmsg in db") log.Info("No first l2sentmsg in db")
return nil return nil
} }
// if no batch, return and wait for next try round // if no batch, return and wait for next try round
batch, err = m.db.GetLatestRollupBatch() batch, err = m.rollupOrm.GetLatestRollupBatch(m.ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get latest batch: %v", err) return fmt.Errorf("failed to get latest batch: %v", err)
} }
@@ -131,7 +145,7 @@ func (m *MsgProofUpdater) initializeWithdrawTrie() error {
batchIndex := batch.BatchIndex batchIndex := batch.BatchIndex
for { for {
var msg *orm.L2SentMsg var msg *orm.L2SentMsg
msg, err = m.db.GetLatestL2SentMsgLEHeight(batch.EndBlockNumber) msg, err = m.l2SentMsgOrm.GetLatestL2SentMsgLEHeight(m.ctx, batch.EndBlockNumber)
if err != nil { if err != nil {
log.Warn("failed to get l2 sent message less than height", "endBlocknum", batch.EndBlockNumber, "err", err) log.Warn("failed to get l2 sent message less than height", "endBlocknum", batch.EndBlockNumber, "err", err)
} }
@@ -155,8 +169,8 @@ func (m *MsgProofUpdater) initializeWithdrawTrie() error {
// iterate for next batch // iterate for next batch
batchIndex-- batchIndex--
batch, err = m.db.GetRollupBatchByIndex(batchIndex) batch, err = m.rollupOrm.GetRollupBatchByIndex(m.ctx, batchIndex)
if err != nil { if err != nil || batch == nil {
return fmt.Errorf("failed to get block batch %v: %v", batchIndex, err) return fmt.Errorf("failed to get block batch %v: %v", batchIndex, err)
} }
} }
@@ -168,7 +182,10 @@ func (m *MsgProofUpdater) initializeWithdrawTrie() error {
if err != nil { if err != nil {
return err return err
} }
err = m.rollupOrm.UpdateRollupBatchWithdrawRoot(m.ctx, b.BatchIndex, m.withdrawTrie.MessageRoot().Hex())
if err != nil {
return err
}
err = m.updateMsgProof(msgs, proofs, b.BatchIndex) err = m.updateMsgProof(msgs, proofs, b.BatchIndex)
if err != nil { if err != nil {
return err return err
@@ -183,40 +200,30 @@ func (m *MsgProofUpdater) updateMsgProof(msgs []*orm.L2SentMsg, proofs [][]byte,
if len(msgs) == 0 { if len(msgs) == 0 {
return nil return nil
} }
// this should not happend, but double checked // this should not happen, but double check
if len(msgs) != len(proofs) { if len(msgs) != len(proofs) {
return fmt.Errorf("illegal state: len(msgs) != len(proofs)") return fmt.Errorf("illegal state: len(msgs) != len(proofs)")
} }
dbTx, err := m.db.Beginx() err := m.db.Transaction(func(tx *gorm.DB) error {
for i, msg := range msgs {
proofHex := common.Bytes2Hex(proofs[i])
log.Debug("updateMsgProof", "msgHash", msg.MsgHash, "batchIndex", batchIndex, "proof", proofHex)
if err := m.l2SentMsgOrm.UpdateL2MessageProof(m.ctx, msg.MsgHash, proofHex, batchIndex, tx); err != nil {
return err
}
}
return nil
})
if err != nil { if err != nil {
return err return err
} }
for i, msg := range msgs {
proofHex := common.Bytes2Hex(proofs[i])
log.Debug("updateMsgProof", "msgHash", msg.MsgHash, "batchIndex", batchIndex, "proof", proofHex)
if dbTxErr := m.db.UpdateL2MessageProofInDBTx(m.ctx, dbTx, msg.MsgHash, proofHex, batchIndex); dbTxErr != nil {
if err := dbTx.Rollback(); err != nil {
log.Error("dbTx.Rollback()", "err", err)
}
return dbTxErr
}
}
if dbTxErr := dbTx.Commit(); dbTxErr != nil {
if err := dbTx.Rollback(); err != nil {
log.Error("dbTx.Rollback()", "err", err)
}
return dbTxErr
}
return nil return nil
} }
// appendL2Messages will append all messages between firstBlock and lastBlock (both inclusive) to withdrawTrie and compute corresponding merkle proof of each message. // appendL2Messages will append all messages between firstBlock and lastBlock (both inclusive) to withdrawTrie and compute corresponding merkle proof of each message.
func (m *MsgProofUpdater) appendL2Messages(firstBlock, lastBlock uint64) ([]*orm.L2SentMsg, [][]byte, error) { func (m *MsgProofUpdater) appendL2Messages(firstBlock, lastBlock uint64) ([]*orm.L2SentMsg, [][]byte, error) {
var msgProofs [][]byte var msgProofs [][]byte
messages, err := m.db.GetL2SentMsgMsgHashByHeightRange(firstBlock, lastBlock) messages, err := m.l2SentMsgOrm.GetL2SentMsgMsgHashByHeightRange(m.ctx, firstBlock, lastBlock)
if err != nil { if err != nil {
log.Error("GetL2SentMsgMsgHashByHeightRange failed", "error", err, "firstBlock", firstBlock, "lastBlock", lastBlock) log.Error("GetL2SentMsgMsgHashByHeightRange failed", "error", err, "firstBlock", firstBlock, "lastBlock", lastBlock)
return messages, msgProofs, err return messages, msgProofs, err

View File

@@ -1,4 +1,4 @@
package message_proof package messageproof
import ( import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"

View File

@@ -1,4 +1,4 @@
package message_proof package messageproof
import ( import (
"math/big" "math/big"

View File

@@ -1,4 +1,4 @@
package cross_msg_test package crossmsg_test
import ( import (
"crypto/rand" "crypto/rand"
@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"bridge-history-api/cross_msg" "bridge-history-api/crossmsg"
) )
func TestMergeIntoList(t *testing.T) { func TestMergeIntoList(t *testing.T) {
@@ -18,7 +18,7 @@ func TestMergeIntoList(t *testing.T) {
assert.Equal(t, headers[0].Hash(), headers[1].ParentHash) assert.Equal(t, headers[0].Hash(), headers[1].ParentHash)
headers2, err := generateHeaders(18) headers2, err := generateHeaders(18)
assert.NoError(t, err) assert.NoError(t, err)
result := cross_msg.MergeAddIntoHeaderList(headers, headers2, 64) result := crossmsg.MergeAddIntoHeaderList(headers, headers2, 64)
assert.Equal(t, 64, len(result)) assert.Equal(t, 64, len(result))
assert.Equal(t, headers2[len(headers2)-1], result[len(result)-1]) assert.Equal(t, headers2[len(headers2)-1], result[len(result)-1])
assert.NotEqual(t, headers[0], result[0]) assert.NotEqual(t, headers[0], result[0])
@@ -53,7 +53,7 @@ func generateHeaders(amount int) ([]*types.Header, error) {
Time: uint64(i * 15), Time: uint64(i * 15),
Extra: []byte{}, Extra: []byte{},
MixDigest: common.Hash{}, MixDigest: common.Hash{},
Nonce: types.EncodeNonce(uint64(nonce.Uint64())), Nonce: types.EncodeNonce(nonce.Uint64()),
} }
headers[i] = header headers[i] = header
} }

View File

@@ -0,0 +1,108 @@
package crossmsg
import (
"context"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
"bridge-history-api/orm"
)
// ReorgHandling handles reorg function type
type ReorgHandling func(ctx context.Context, reorgHeight uint64, db *gorm.DB) error
func reverseArray(arr []*types.Header) []*types.Header {
for i := 0; i < len(arr)/2; i++ {
j := len(arr) - i - 1
arr[i], arr[j] = arr[j], arr[i]
}
return arr
}
// IsParentAndChild match the child header ParentHash with parent header Hash
func IsParentAndChild(parentHeader *types.Header, header *types.Header) bool {
return header.ParentHash == parentHeader.Hash()
}
// MergeAddIntoHeaderList merges two header lists, if exceed the max length then drop the oldest entries
func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []*types.Header {
mergedArr := append(baseArr, extraArr...)
if len(mergedArr) <= maxLength {
return mergedArr
}
startIndex := len(mergedArr) - maxLength
return mergedArr[startIndex:]
}
// BackwardFindReorgBlock finds the reorg block by backward search
func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, lastHeader *types.Header) (int, bool, []*types.Header) {
maxStep := len(headers)
backwardHeaderList := []*types.Header{lastHeader}
for iterRound := 0; iterRound < maxStep; iterRound++ {
header, err := client.HeaderByHash(ctx, lastHeader.ParentHash)
if err != nil {
log.Error("BackwardFindReorgBlock failed", "error", err)
return -1, false, nil
}
backwardHeaderList = append(backwardHeaderList, header)
for j := len(headers) - 1; j >= 0; j-- {
if IsParentAndChild(headers[j], header) {
backwardHeaderList = reverseArray(backwardHeaderList)
return j, true, backwardHeaderList
}
}
lastHeader = header
}
return -1, false, nil
}
// L1ReorgHandling handles l1 reorg
func L1ReorgHandling(ctx context.Context, reorgHeight uint64, db *gorm.DB) error {
l1CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
err := db.Transaction(func(tx *gorm.DB) error {
if err := l1CrossMsgOrm.DeleteL1CrossMsgAfterHeight(ctx, reorgHeight, tx); err != nil {
log.Error("delete l1 cross msg from height", "height", reorgHeight, "err", err)
return err
}
if err := relayedOrm.DeleteL1RelayedHashAfterHeight(ctx, reorgHeight, tx); err != nil {
log.Error("delete l1 relayed msg from height", "height", reorgHeight, "err", err)
return err
}
return nil
})
if err != nil {
log.Crit("l1 reorg handling failed", "err", err)
}
return err
}
// L2ReorgHandling handles l2 reorg
func L2ReorgHandling(ctx context.Context, reorgHeight uint64, db *gorm.DB) error {
l2CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
l2SentMsgOrm := orm.NewL2SentMsg(db)
err := db.Transaction(func(tx *gorm.DB) error {
if err := l2CrossMsgOrm.DeleteL2CrossMsgFromHeight(ctx, reorgHeight, tx); err != nil {
log.Error("delete l2 cross msg from height", "height", reorgHeight, "err", err)
return err
}
if err := relayedOrm.DeleteL2RelayedHashAfterHeight(ctx, reorgHeight, tx); err != nil {
log.Error("delete l2 relayed msg from height", "height", reorgHeight, "err", err)
return err
}
if err := l2SentMsgOrm.DeleteL2SentMsgAfterHeight(ctx, reorgHeight, tx); err != nil {
log.Error("delete l2 sent msg from height", "height", reorgHeight, "err", err)
return err
}
return nil
})
if err != nil {
log.Crit("l2 reorg handling failed", "err", err)
}
return err
}

View File

@@ -1,70 +0,0 @@
package orm
import (
"database/sql"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type rollupBatchOrm struct {
db *sqlx.DB
}
type RollupBatch struct {
ID uint64 `json:"id" db:"id"`
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
BatchHash string `json:"batch_hash" db:"batch_hash"`
CommitHeight uint64 `json:"commit_height" db:"commit_height"`
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
}
// NewRollupBatchOrm create an NewRollupBatchOrm instance
func NewRollupBatchOrm(db *sqlx.DB) RollupBatchOrm {
return &rollupBatchOrm{db: db}
}
func (b *rollupBatchOrm) BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, batches []*RollupBatch) error {
if len(batches) == 0 {
return nil
}
var err error
batchMaps := make([]map[string]interface{}, len(batches))
for i, batch := range batches {
batchMaps[i] = map[string]interface{}{
"commit_height": batch.CommitHeight,
"batch_index": batch.BatchIndex,
"batch_hash": batch.BatchHash,
"start_block_number": batch.StartBlockNumber,
"end_block_number": batch.EndBlockNumber,
}
}
_, err = dbTx.NamedExec(`insert into rollup_batch(commit_height, batch_index, batch_hash, start_block_number, end_block_number) values(:commit_height, :batch_index, :batch_hash, :start_block_number, :end_block_number);`, batchMaps)
if err != nil {
log.Error("BatchInsertRollupBatchDBTx: failed to insert batch event msgs", "err", err)
return err
}
return nil
}
func (b *rollupBatchOrm) GetLatestRollupBatch() (*RollupBatch, error) {
result := &RollupBatch{}
row := b.db.QueryRowx(`SELECT id, batch_index, commit_height, batch_hash, start_block_number, end_block_number FROM rollup_batch ORDER BY batch_index DESC LIMIT 1;`)
if err := row.StructScan(result); err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err
}
return result, nil
}
func (b *rollupBatchOrm) GetRollupBatchByIndex(index uint64) (*RollupBatch, error) {
result := &RollupBatch{}
row := b.db.QueryRowx(`SELECT id, batch_index, batch_hash, commit_height, start_block_number, end_block_number FROM rollup_batch WHERE batch_index = $1;`, index)
if err := row.StructScan(result); err != nil {
return nil, err
}
return result, nil
}

View File

@@ -1,116 +0,0 @@
package orm
import (
"context"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/jmoiron/sqlx"
)
type AssetType int
type MsgType int
func (a AssetType) String() string {
switch a {
case ETH:
return "ETH"
case ERC20:
return "ERC20"
case ERC1155:
return "ERC1155"
case ERC721:
return "ERC721"
}
return "Unknown Asset Type"
}
const (
ETH AssetType = iota
ERC20
ERC721
ERC1155
)
const (
UnknownMsg MsgType = iota
Layer1Msg
Layer2Msg
)
// CrossMsg represents a cross message from layer 1 to layer 2
type CrossMsg struct {
ID uint64 `json:"id" db:"id"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Target string `json:"target" db:"target"`
Amount string `json:"amount" db:"amount"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Layer1Token string `json:"layer1_token" db:"layer1_token"`
Layer2Token string `json:"layer2_token" db:"layer2_token"`
TokenIDs []string `json:"token_ids" db:"token_ids"`
TokenAmounts []string `json:"token_amounts" db:"token_amounts"`
Asset int `json:"asset" db:"asset"`
MsgType int `json:"msg_type" db:"msg_type"`
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
}
// L1CrossMsgOrm provides operations on l1_cross_message table
type L1CrossMsgOrm interface {
GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error)
GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error)
BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error
// UpdateL1CrossMsgHashDBTx invoked when SentMessage event is received
UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error
UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error
GetLatestL1ProcessedHeight() (int64, error)
DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error
GetL1EarliestNoBlockTimestampHeight() (uint64, error)
}
// L2CrossMsgOrm provides operations on cross_message table
type L2CrossMsgOrm interface {
GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error)
GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error)
BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error
// UpdateL2CrossMsgHashDBTx invoked when SentMessage event is received
UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error
UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error
GetLatestL2ProcessedHeight() (int64, error)
DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error
UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error
GetL2EarliestNoBlockTimestampHeight() (uint64, error)
}
type RelayedMsgOrm interface {
BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*RelayedMsg) error
GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error)
GetLatestRelayedHeightOnL1() (int64, error)
GetLatestRelayedHeightOnL2() (int64, error)
DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
}
type L2SentMsgOrm interface {
BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error
GetL2SentMsgByHash(l2Hash string) (*L2SentMsg, error)
GetLatestSentMsgHeightOnL2() (int64, error)
GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error)
GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error)
GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error)
UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error
GetLatestL2SentMsgBatchIndex() (int64, error)
DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
}
type RollupBatchOrm interface {
GetLatestRollupBatch() (*RollupBatch, error)
GetRollupBatchByIndex(index uint64) (*RollupBatch, error)
BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, messages []*RollupBatch) error
}

View File

@@ -1,140 +0,0 @@
package orm
import (
"context"
"database/sql"
"errors"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type l1CrossMsgOrm struct {
db *sqlx.DB
}
// NewL1CrossMsgOrm create an NewL1CrossMsgOrm instance
func NewL1CrossMsgOrm(db *sqlx.DB) L1CrossMsgOrm {
return &l1CrossMsgOrm{db: db}
}
func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error) {
result := &CrossMsg{}
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND deleted_at IS NULL;`, l1Hash.String(), Layer1Msg)
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
}
return nil, err
}
return result, nil
}
// GetL1CrossMsgsByAddress returns all layer1 cross messages under given address
// Warning: return empty slice if no data found
func (l *l1CrossMsgOrm) GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND deleted_at IS NULL;`, sender.String(), Layer1Msg)
for rows.Next() {
msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
if len(results) == 0 && errors.Is(err, sql.ErrNoRows) {
} else if err != nil {
return nil, err
}
return results, nil
}
func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error {
if len(messages) == 0 {
return nil
}
var err error
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"height": msg.Height,
"sender": msg.Sender,
"target": msg.Target,
"amount": msg.Amount,
"asset": msg.Asset,
"layer1_hash": msg.Layer1Hash,
"layer1_token": msg.Layer1Token,
"layer2_token": msg.Layer2Token,
"token_ids": msg.TokenIDs,
"msg_type": Layer1Msg,
}
}
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps)
if err != nil {
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err)
return err
}
return nil
}
// UpdateL1CrossMsgHashDBTx update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
return err
}
return nil
}
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
return err
}
return nil
}
func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer1Msg)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return 0, err
}
if result.Valid {
return result.Int64, nil
}
return 0, nil
}
func (l *l1CrossMsgOrm) DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
if _, err := l.db.Exec(`UPDATE cross_message SET deleted_at = current_timestamp WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
return err
}
return nil
}
func (l *l1CrossMsgOrm) UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error {
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer1Msg); err != nil {
return err
}
return nil
}
func (l *l1CrossMsgOrm) GetL1EarliestNoBlockTimestampHeight() (uint64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer1Msg)
var result uint64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows {
return 0, nil
}
return 0, err
}
return result, nil
}

View File

@@ -1,142 +0,0 @@
package orm
import (
"context"
"database/sql"
"errors"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type l2CrossMsgOrm struct {
db *sqlx.DB
}
// NewL2CrossMsgOrm create an NewL2CrossMsgOrm instance
func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
return &l2CrossMsgOrm{db: db}
}
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
result := &CrossMsg{}
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND deleted_at IS NULL;`, l2Hash.String())
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
}
return nil, err
}
return result, nil
}
// GetL2CrossMsgByAddress returns all layer2 cross messages under given address
// Warning: return empty slice if no data found
func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND deleted_at IS NULL;`, sender.String(), Layer2Msg)
for rows.Next() {
msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
if len(results) == 0 && errors.Is(err, sql.ErrNoRows) {
// log.Warn("no unprocessed layer1 messages in db", "err", err)
} else if err != nil {
return nil, err
}
return results, nil
}
func (l *l2CrossMsgOrm) DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE cross_message SET deleted_at = current_timestamp where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
if err != nil {
log.Error("DeleteL1CrossMsgAfterHeightDBTx: failed to delete", "height", height, "err", err)
return err
}
return nil
}
func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error {
if len(messages) == 0 {
return nil
}
var err error
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"height": msg.Height,
"sender": msg.Sender,
"target": msg.Target,
"asset": msg.Asset,
"layer2_hash": msg.Layer2Hash,
"layer1_token": msg.Layer1Token,
"layer2_token": msg.Layer2Token,
"token_ids": msg.TokenIDs,
"amount": msg.Amount,
"msg_type": Layer2Msg,
}
}
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps)
if err != nil {
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err)
return err
}
return nil
}
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
return err
}
return nil
}
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
return err
}
return nil
}
func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer2Msg)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return 0, err
}
if result.Valid {
return result.Int64, nil
}
return 0, nil
}
func (l *l2CrossMsgOrm) UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error {
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer2Msg); err != nil {
return err
}
return nil
}
func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer2Msg)
var result uint64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows {
return 0, nil
}
return 0, err
}
return result, nil
}

View File

@@ -1,151 +0,0 @@
package orm
import (
"context"
"database/sql"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type L2SentMsg struct {
ID uint64 `json:"id" db:"id"`
TxSender string `json:"tx_sender" db:"tx_sender"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Sender string `json:"sender" db:"sender"`
Target string `json:"target" db:"target"`
Value string `json:"value" db:"value"`
Height uint64 `json:"height" db:"height"`
Nonce uint64 `json:"nonce" db:"nonce"`
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
MsgProof string `json:"msg_proof" db:"msg_proof"`
MsgData string `json:"msg_data" db:"msg_data"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
}
type l2SentMsgOrm struct {
db *sqlx.DB
}
// NewL2SentMsgOrm create an NewRollupBatchOrm instance
func NewL2SentMsgOrm(db *sqlx.DB) L2SentMsgOrm {
return &l2SentMsgOrm{db: db}
}
func (l *l2SentMsgOrm) GetL2SentMsgByHash(msgHash string) (*L2SentMsg, error) {
result := &L2SentMsg{}
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msgHash)
if err := row.StructScan(result); err != nil {
return nil, err
}
return result, nil
}
func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error {
if len(messages) == 0 {
return nil
}
var err error
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"tx_sender": msg.TxSender,
"sender": msg.Sender,
"target": msg.Target,
"value": msg.Value,
"msg_hash": msg.MsgHash,
"height": msg.Height,
"nonce": msg.Nonce,
"batch_index": msg.BatchIndex,
"msg_proof": msg.MsgProof,
"msg_data": msg.MsgData,
}
}
_, err = dbTx.NamedExec(`insert into l2_sent_msg(tx_sender, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:tx_sender, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
if err != nil {
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err)
return err
}
return err
}
func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
row := l.db.QueryRow(`SELECT height FROM l2_sent_msg WHERE deleted_at IS NULL ORDER BY nonce DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return 0, err
}
if result.Valid {
return result.Int64, nil
}
return 0, nil
}
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND deleted_at IS NULL;"), proof, batch_index, msgHash); err != nil {
return err
}
return nil
}
func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE msg_proof != '' AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return -1, err
}
if result.Valid {
return result.Int64, nil
}
return -1, nil
}
func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error) {
var results []*L2SentMsg
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE height >= $1 AND height <= $2 AND deleted_at IS NULL ORDER BY nonce ASC;`, startHeight, endHeight)
if err != nil {
return nil, err
}
for rows.Next() {
msg := &L2SentMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
return results, err
}
func (l *l2SentMsgOrm) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error) {
result := &L2SentMsg{}
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE nonce = $1 AND deleted_at IS NULL;`, nonce)
err := row.StructScan(result)
if err != nil {
return nil, err
}
return result, nil
}
func (l *l2SentMsgOrm) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error) {
result := &L2SentMsg{}
row := l.db.QueryRowx(`select * from l2_sent_msg where height <= $1 AND deleted_at IS NULL order by nonce desc limit 1`, endBlockNumber)
err := row.StructScan(result)
if err != nil {
return nil, err
}
return result, nil
}
func (l *l2SentMsgOrm) DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height)
return err
}

View File

@@ -1,99 +0,0 @@
package orm
import (
"database/sql"
"errors"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type RelayedMsg struct {
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
}
type relayedMsgOrm struct {
db *sqlx.DB
}
// NewRelayedMsgOrm create an NewRelayedMsgOrm instance
func NewRelayedMsgOrm(db *sqlx.DB) RelayedMsgOrm {
return &relayedMsgOrm{db: db}
}
func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*RelayedMsg) error {
if len(messages) == 0 {
return nil
}
var err error
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"msg_hash": msg.MsgHash,
"height": msg.Height,
"layer1_hash": msg.Layer1Hash,
"layer2_hash": msg.Layer2Hash,
}
}
_, err = dbTx.NamedExec(`insert into relayed_msg(msg_hash, height, layer1_hash, layer2_hash) values(:msg_hash, :height, :layer1_hash, :layer2_hash);`, messageMaps)
if err != nil {
log.Error("BatchInsertRelayedMsgDBTx: failed to insert relayed msgs", "err", err)
return err
}
return nil
}
func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error) {
result := &RelayedMsg{}
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msg_hash)
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
}
return nil, err
}
return result, nil
}
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return 0, err
}
if result.Valid {
return result.Int64, nil
}
return 0, nil
}
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return 0, err
}
if result.Valid {
return result.Int64, nil
}
return 0, nil
}
func (l *relayedMsgOrm) DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer1_hash != '';`, height)
return err
}
func (l *relayedMsgOrm) DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer2_hash != '';`, height)
return err
}

View File

@@ -1,97 +0,0 @@
package db
import (
"database/sql"
"errors"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint
"bridge-history-api/config"
"bridge-history-api/db/orm"
)
// OrmFactory include all ormFactory interface
type OrmFactory interface {
orm.L1CrossMsgOrm
orm.L2CrossMsgOrm
orm.RelayedMsgOrm
orm.L2SentMsgOrm
orm.RollupBatchOrm
GetTotalCrossMsgCountByAddress(sender string) (uint64, error)
GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error)
GetDB() *sqlx.DB
Beginx() (*sqlx.Tx, error)
Close() error
}
type ormFactory struct {
orm.L1CrossMsgOrm
orm.L2CrossMsgOrm
orm.RelayedMsgOrm
orm.L2SentMsgOrm
orm.RollupBatchOrm
*sqlx.DB
}
// NewOrmFactory create an ormFactory factory include all ormFactory interface
func NewOrmFactory(cfg *config.Config) (OrmFactory, error) {
// Initialize sql/sqlx
db, err := sqlx.Open(cfg.DB.DriverName, cfg.DB.DSN)
if err != nil {
return nil, err
}
db.SetMaxOpenConns(cfg.DB.MaxOpenNum)
db.SetMaxIdleConns(cfg.DB.MaxIdleNum)
if err = db.Ping(); err != nil {
return nil, err
}
return &ormFactory{
L1CrossMsgOrm: orm.NewL1CrossMsgOrm(db),
L2CrossMsgOrm: orm.NewL2CrossMsgOrm(db),
RelayedMsgOrm: orm.NewRelayedMsgOrm(db),
L2SentMsgOrm: orm.NewL2SentMsgOrm(db),
RollupBatchOrm: orm.NewRollupBatchOrm(db),
DB: db,
}, nil
}
func (o *ormFactory) GetDB() *sqlx.DB {
return o.DB
}
func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
return o.DB.Beginx()
}
func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, error) {
var count uint64
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND deleted_at IS NULL;`, sender)
if err := row.Scan(&count); err != nil {
return 0, err
}
return count, nil
}
func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error) {
para := sender
var results []*orm.CrossMsg
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND deleted_at IS NULL ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
if err != nil || rows == nil {
return nil, err
}
for rows.Next() {
msg := &orm.CrossMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
if len(results) == 0 && errors.Is(err, sql.ErrNoRows) {
} else if err != nil {
return nil, err
}
return results, nil
}

View File

@@ -4,34 +4,28 @@ go 1.19
require ( require (
github.com/ethereum/go-ethereum v1.12.0 github.com/ethereum/go-ethereum v1.12.0
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458 github.com/gin-contrib/cors v1.4.0
github.com/jmoiron/sqlx v1.3.5 github.com/gin-gonic/gin v1.9.1
github.com/kataras/iris/v12 v12.2.0
github.com/lib/pq v1.10.7
github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.18 github.com/mattn/go-isatty v0.0.19
github.com/modern-go/reflect2 v1.0.2 github.com/modern-go/reflect2 v1.0.2
github.com/pressly/goose/v3 v3.7.0 github.com/pressly/goose/v3 v3.7.0
github.com/stretchr/testify v1.8.2 github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa github.com/urfave/cli/v2 v2.25.7
gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.2
) )
require ( require (
github.com/BurntSushi/toml v1.2.1 // indirect
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 // indirect
github.com/CloudyKit/jet/v6 v6.2.0 // indirect
github.com/DataDog/zstd v1.5.2 // indirect github.com/DataDog/zstd v1.5.2 // indirect
github.com/Joker/jade v1.1.3 // indirect
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/bits-and-blooms/bitset v1.7.0 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/bytedance/sonic v1.9.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/errors v1.9.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
@@ -44,28 +38,26 @@ require (
github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v20.10.21+incompatible // indirect github.com/docker/docker v23.0.6+incompatible // indirect
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/flosch/pongo2/v4 v4.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/gobwas/httphead v0.1.0 // indirect github.com/goccy/go-json v0.10.2 // indirect
github.com/gobwas/pool v0.2.1 // indirect
github.com/gobwas/ws v1.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/css v1.0.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect
@@ -75,36 +67,29 @@ require (
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/iris-contrib/go.uuid v2.0.0+incompatible // indirect github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/iris-contrib/schema v0.0.6 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/kataras/blocks v0.0.7 // indirect github.com/jinzhu/now v1.1.5 // indirect
github.com/kataras/golog v0.1.8 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kataras/neffos v0.0.21 // indirect
github.com/kataras/pio v0.0.11 // indirect
github.com/kataras/sitemap v0.0.6 // indirect
github.com/kataras/tunnel v0.0.4 // indirect
github.com/klauspost/compress v1.16.0 // indirect github.com/klauspost/compress v1.16.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/mailgun/raymond/v2 v2.0.48 // indirect github.com/leodido/go-urn v1.2.4 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mediocregopher/radix/v3 v3.8.1 // indirect
github.com/microcosm-cc/bluemonday v1.0.23 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/nats-io/nats.go v1.23.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.27.1 // indirect github.com/onsi/gomega v1.27.1 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
@@ -116,35 +101,27 @@ require (
github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/rs/cors v1.7.0 // indirect github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/schollz/closestmatch v2.1.0+incompatible // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b // indirect github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tdewolff/minify/v2 v2.12.4 // indirect
github.com/tdewolff/parse/v2 v2.6.4 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect github.com/tklauser/numcpus v0.6.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/ugorji/go/codec v1.2.11 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yosssi/ace v0.0.5 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.10.0 // indirect golang.org/x/arch v0.4.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
golang.org/x/net v0.10.0 // indirect golang.org/x/net v0.12.0 // indirect
golang.org/x/sync v0.1.0 // indirect golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.9.0 // indirect golang.org/x/sys v0.10.0 // indirect
golang.org/x/text v0.10.0 // indirect golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.8.0 // indirect golang.org/x/tools v0.11.0 // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect google.golang.org/protobuf v1.31.0 // indirect
google.golang.org/protobuf v1.29.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect

View File

@@ -1,41 +1,25 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 h1:sR+/8Yb4slttB4vD+b9btVEnWgL3Q00OBTzVT8B9C0c=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
github.com/CloudyKit/jet/v6 v6.2.0 h1:EpcZ6SR9n28BUGtNJSvlBqf90IpjeFr36Tizxhn/oME=
github.com/CloudyKit/jet/v6 v6.2.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4=
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/Joker/hpp v1.0.0 h1:65+iuJYdRXv/XyN62C1uEmmOx3432rNG/rKlX6V7Kkc=
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.1.3 h1:Qbeh12Vq6BxURXT1qZBRHsDxeURB8ztcL6f3EXSGeHk=
github.com/Joker/jade v1.1.3/go.mod h1:T+2WLyt7VH6Lp0TRxQrUYEs64nRc83wkMQrfeIQKduM=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06 h1:KkH3I3sJuOLP3TjA/dfr4NAY8bghDwnXiU7cTKxQqo0=
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM=
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
@@ -47,12 +31,17 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
@@ -96,11 +85,9 @@ github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRk
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE= github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -112,17 +99,15 @@ github.com/ethereum/c-kzg-4844 v0.2.0/go.mod h1:WI2Nd82DMZAAZI1wV2neKGost9EKjvbp
github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0= github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0=
github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs= github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flosch/pongo2/v4 v4.0.2 h1:gv+5Pe3vaSVmiJvh/BZa82b7/00YUGm0PIyVVLop0Hw=
github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
@@ -132,8 +117,15 @@ github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TB
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=
github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
@@ -143,21 +135,26 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/gobwas/ws v1.1.0 h1:7RFti/xnNkMJnrK7D1yQ/iCIB5OrrY/54/H930kIbHA= github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gobwas/ws v1.1.0/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
@@ -199,15 +196,12 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
@@ -227,7 +221,6 @@ github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk=
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k= github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k=
@@ -238,48 +231,38 @@ github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM= github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM=
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
github.com/iris-contrib/go.uuid v2.0.0+incompatible h1:XZubAYg61/JwnJNbZilGjf3b3pB80+OQg2qf6c8BfWE=
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
github.com/iris-contrib/httpexpect/v2 v2.12.1 h1:3cTZSyBBen/kfjCtgNFoUKi1u0FVXNaAjyRJOo6AVS4=
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458 h1:V60rHQJc6DieKV1BqHIGclraPdO4kinuFAZIrPGHN7s=
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458/go.mod h1:7eVziAp1yUwFB/ZMg71n84VWQH+7wukvxcHuF2e7cbg=
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
github.com/iris-contrib/schema v0.0.6 h1:CPSBLyx2e91H2yJzPuhGuifVRnZBBJ3pCOMbOvPZaTw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU=
github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/kataras/blocks v0.0.7 h1:cF3RDY/vxnSRezc7vLFlQFTYXG/yAr1o7WImJuZbzC4=
github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I=
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
github.com/kataras/golog v0.1.8 h1:isP8th4PJH2SrbkciKnylaND9xoTtfxv++NB+DF0l9g=
github.com/kataras/golog v0.1.8/go.mod h1:rGPAin4hYROfk1qT9wZP6VY2rsb4zzc37QpdPjdkqVw=
github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
github.com/kataras/iris/v12 v12.2.0 h1:WzDY5nGuW/LgVaFS5BtTkW3crdSKJ/FEgWnxPnIVVLI=
github.com/kataras/iris/v12 v12.2.0/go.mod h1:BLzBpEunc41GbE68OUaQlqX4jzi791mx5HU04uPb90Y=
github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
github.com/kataras/neffos v0.0.21 h1:UwN/F44jlqdtgFI29y3VhA7IlJ4JbK3UjCbTDg1pYoo=
github.com/kataras/neffos v0.0.21/go.mod h1:FeGka8lu8cjD2H+0OpBvW8c6xXawy3fj5VX6xcIJ1Fg=
github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
github.com/kataras/pio v0.0.11 h1:kqreJ5KOEXGMwHAWHDwIl+mjfNCPhAwZPa8gK7MKlyw=
github.com/kataras/pio v0.0.11/go.mod h1:38hH6SWH6m4DKSYmRhlrCJ5WItwWgCVrTNU62XZyUvI=
github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
github.com/kataras/sitemap v0.0.6 h1:w71CRMMKYMJh6LR2wTgnk5hSgjVNB9KL60n5e2KHvLY=
github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4=
github.com/kataras/tunnel v0.0.4 h1:sCAqWuJV7nPzGrlb0os3j49lk2JhILT0rID38NHNLpA=
github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@@ -289,7 +272,11 @@ github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -302,18 +289,13 @@ github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4F
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw=
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -326,27 +308,18 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M=
github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/microcosm-cc/bluemonday v1.0.23 h1:SMZe2IGa0NuHvnVNAZ+6B38gsTbi5e4sViiWJyDDqFY=
github.com/microcosm-cc/bluemonday v1.0.23/go.mod h1:mN70sk7UkkF8TUr2IGBpNN0jAgStuPzlK76QuruE/z4=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -357,25 +330,17 @@ github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iP
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/nats-io/jwt v0.3.0 h1:xdnzwFETV++jNc4W1mw//qFyJGb2ABOombmZJQS4+Qo=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI=
github.com/nats-io/nats-server/v2 v2.9.11 h1:4y5SwWvWI59V5mcqtuoqKq6L9NDUydOP3Ekwuwl8cZI=
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nats.go v1.23.0 h1:lR28r7IX44WjYgdiKz9GmUeW0uh/m33uD3yEjLZ2cOE=
github.com/nats-io/nats.go v1.23.0/go.mod h1:ki/Scsa23edbh8IRZbCuNXR9TDcbvfaSijKtaqQgw+Q=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@@ -396,6 +361,9 @@ github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfad
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
@@ -422,6 +390,7 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@@ -432,18 +401,11 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo=
github.com/schollz/closestmatch v2.1.0+incompatible h1:Uel2GXEpJqOWBrlyI+oY9LTiyyjYS17cCYRqP13/SHk=
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
@@ -451,7 +413,6 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
@@ -466,65 +427,57 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM= github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM=
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tdewolff/minify/v2 v2.12.4 h1:kejsHQMM17n6/gwdw53qsi6lg0TGddZADVyQOz1KMdE=
github.com/tdewolff/minify/v2 v2.12.4/go.mod h1:h+SRvSIX3kwgwTFOpSckvSxgax3uy8kZTSF1Ojrr3bk=
github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZycQ=
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM=
github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
github.com/yosssi/ace v0.0.5 h1:tUkIP/BLdKqrlrPwcmH0shwEEhTRHoGnc1wFIWmaBUA=
github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0=
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -534,11 +487,12 @@ golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
@@ -550,8 +504,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -573,9 +527,10 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -585,8 +540,10 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -608,7 +565,6 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201207223542-d4d67f95c62d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -618,28 +574,32 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
@@ -659,15 +619,13 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -692,20 +650,19 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
@@ -724,6 +681,11 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
@@ -736,6 +698,6 @@ modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8= modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI= modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=

View File

@@ -0,0 +1,20 @@
package controller
import (
"sync"
"gorm.io/gorm"
)
var (
// HistoryCtrler is controller instance
HistoryCtrler *HistoryController
initControllerOnce sync.Once
)
// InitController inits Controller with database
func InitController(db *gorm.DB) {
initControllerOnce.Do(func() {
HistoryCtrler = NewHistoryController(db)
})
}

View File

@@ -0,0 +1,72 @@
package controller
import (
"github.com/ethereum/go-ethereum/common"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
"bridge-history-api/internal/logic"
"bridge-history-api/internal/types"
)
// HistoryController contains the query claimable txs service
type HistoryController struct {
historyLogic *logic.HistoryLogic
}
// NewHistoryController return HistoryController instance
func NewHistoryController(db *gorm.DB) *HistoryController {
return &HistoryController{
historyLogic: logic.NewHistoryLogic(db),
}
}
// GetAllClaimableTxsByAddr defines the http get method behavior
func (c *HistoryController) GetAllClaimableTxsByAddr(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
return
}
offset := (req.Page - 1) * req.PageSize
limit := req.PageSize
txs, total, err := c.historyLogic.GetClaimableTxsByAddress(ctx, common.HexToAddress(req.Address), offset, limit)
if err != nil {
types.RenderJSON(ctx, types.ErrGetClaimablesFailure, err, nil)
return
}
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: txs, Total: total})
}
// GetAllTxsByAddr defines the http get method behavior
func (c *HistoryController) GetAllTxsByAddr(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
return
}
offset := (req.Page - 1) * req.PageSize
limit := req.PageSize
message, total, err := c.historyLogic.GetTxsByAddress(ctx, common.HexToAddress(req.Address), offset, limit)
if err != nil {
types.RenderJSON(ctx, types.ErrGetTxsByAddrFailure, err, nil)
return
}
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: message, Total: total})
}
// PostQueryTxsByHash defines the http post method behavior
func (c *HistoryController) PostQueryTxsByHash(ctx *gin.Context) {
var req types.QueryByHashRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
return
}
result, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
if err != nil {
types.RenderJSON(ctx, types.ErrGetTxsByHashFailure, err, nil)
return
}
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: result, Total: 0})
}

View File

@@ -0,0 +1,204 @@
package logic
import (
"context"
"strconv"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
"bridge-history-api/internal/types"
"bridge-history-api/orm"
)
// HistoryLogic example service.
type HistoryLogic struct {
db *gorm.DB
}
// NewHistoryLogic returns services backed with a "db"
func NewHistoryLogic(db *gorm.DB) *HistoryLogic {
logic := &HistoryLogic{db: db}
return logic
}
// getCrossTxClaimInfo get UserClaimInfos by address
func getCrossTxClaimInfo(ctx context.Context, msgHash string, db *gorm.DB) *types.UserClaimInfo {
l2SentMsgOrm := orm.NewL2SentMsg(db)
rollupOrm := orm.NewRollupBatch(db)
l2sentMsg, err := l2SentMsgOrm.GetL2SentMsgByHash(ctx, msgHash)
if err != nil || l2sentMsg == nil {
log.Debug("getCrossTxClaimInfo failed", "error", err)
return &types.UserClaimInfo{}
}
batch, err := rollupOrm.GetRollupBatchByIndex(ctx, l2sentMsg.BatchIndex)
if err != nil {
log.Debug("getCrossTxClaimInfo failed", "error", err)
return &types.UserClaimInfo{}
}
return &types.UserClaimInfo{
From: l2sentMsg.Sender,
To: l2sentMsg.Target,
Value: l2sentMsg.Value,
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
Message: l2sentMsg.MsgData,
Proof: "0x" + l2sentMsg.MsgProof,
BatchHash: batch.BatchHash,
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
}
}
func updateCrossTxHash(ctx context.Context, msgHash string, txInfo *types.TxHistoryInfo, db *gorm.DB) {
relayed := orm.NewRelayedMsg(db)
relayed, err := relayed.GetRelayedMsgByHash(ctx, msgHash)
if err != nil {
log.Debug("updateCrossTxHash failed", "error", err)
return
}
if relayed == nil {
return
}
if relayed.Layer1Hash != "" {
txInfo.FinalizeTx.Hash = relayed.Layer1Hash
txInfo.FinalizeTx.BlockNumber = relayed.Height
return
}
if relayed.Layer2Hash != "" {
txInfo.FinalizeTx.Hash = relayed.Layer2Hash
txInfo.FinalizeTx.BlockNumber = relayed.Height
return
}
}
// GetClaimableTxsByAddress get all claimable txs under given address
func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address common.Address, offset int, limit int) ([]*types.TxHistoryInfo, uint64, error) {
var txHistories []*types.TxHistoryInfo
l2SentMsgOrm := orm.NewL2SentMsg(h.db)
l2CrossMsgOrm := orm.NewCrossMsg(h.db)
total, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressTotalNum(ctx, address.Hex())
if err != nil || total == 0 {
return txHistories, 0, err
}
results, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressWithOffset(ctx, address.Hex(), offset, limit)
if err != nil || len(results) == 0 {
return txHistories, 0, err
}
var msgHashList []string
for _, result := range results {
msgHashList = append(msgHashList, result.MsgHash)
}
crossMsgs, err := l2CrossMsgOrm.GetL2CrossMsgByMsgHashList(ctx, msgHashList)
// crossMsgs can be empty, because they can be emitted by user directly call contract
if err != nil {
return txHistories, 0, err
}
crossMsgMap := make(map[string]*orm.CrossMsg)
for _, crossMsg := range crossMsgs {
crossMsgMap[crossMsg.MsgHash] = crossMsg
}
for _, result := range results {
txInfo := &types.TxHistoryInfo{
Hash: result.TxHash,
IsL1: false,
BlockNumber: result.Height,
FinalizeTx: &types.Finalized{},
ClaimInfo: getCrossTxClaimInfo(ctx, result.MsgHash, h.db),
}
if crossMsg, exist := crossMsgMap[result.MsgHash]; exist {
txInfo.Amount = crossMsg.Amount
txInfo.To = crossMsg.Target
txInfo.BlockTimestamp = crossMsg.Timestamp
txInfo.CreatedAt = crossMsg.CreatedAt
}
txHistories = append(txHistories, txInfo)
}
return txHistories, total, err
}
// GetTxsByAddress get all txs under given address
func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address common.Address, offset int, limit int) ([]*types.TxHistoryInfo, uint64, error) {
var txHistories []*types.TxHistoryInfo
utilOrm := orm.NewCrossMsg(h.db)
total, err := utilOrm.GetTotalCrossMsgCountByAddress(ctx, address.String())
if err != nil || total == 0 {
return txHistories, 0, err
}
result, err := utilOrm.GetCrossMsgsByAddressWithOffset(ctx, address.String(), offset, limit)
if err != nil {
return nil, 0, err
}
for _, msg := range result {
txHistory := &types.TxHistoryInfo{
Hash: msg.Layer1Hash + msg.Layer2Hash,
Amount: msg.Amount,
To: msg.Target,
IsL1: msg.MsgType == int(orm.Layer1Msg),
BlockNumber: msg.Height,
BlockTimestamp: msg.Timestamp,
CreatedAt: msg.CreatedAt,
FinalizeTx: &types.Finalized{
Hash: "",
},
ClaimInfo: getCrossTxClaimInfo(ctx, msg.MsgHash, h.db),
}
updateCrossTxHash(ctx, msg.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
}
return txHistories, total, nil
}
// GetTxsByHashes get tx infos under given tx hashes
func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, hashes []string) ([]*types.TxHistoryInfo, error) {
txHistories := make([]*types.TxHistoryInfo, 0)
CrossMsgOrm := orm.NewCrossMsg(h.db)
for _, hash := range hashes {
l1result, err := CrossMsgOrm.GetL1CrossMsgByHash(ctx, common.HexToHash(hash))
if err != nil {
return nil, err
}
if l1result != nil {
txHistory := &types.TxHistoryInfo{
Hash: l1result.Layer1Hash,
Amount: l1result.Amount,
To: l1result.Target,
IsL1: true,
BlockNumber: l1result.Height,
BlockTimestamp: l1result.Timestamp,
CreatedAt: l1result.CreatedAt,
FinalizeTx: &types.Finalized{
Hash: "",
},
}
updateCrossTxHash(ctx, l1result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
continue
}
l2result, err := CrossMsgOrm.GetL2CrossMsgByHash(ctx, common.HexToHash(hash))
if err != nil {
return nil, err
}
if l2result != nil {
txHistory := &types.TxHistoryInfo{
Hash: l2result.Layer2Hash,
Amount: l2result.Amount,
To: l2result.Target,
IsL1: false,
BlockNumber: l2result.Height,
BlockTimestamp: l2result.Timestamp,
CreatedAt: l2result.CreatedAt,
FinalizeTx: &types.Finalized{
Hash: "",
},
ClaimInfo: getCrossTxClaimInfo(ctx, l2result.MsgHash, h.db),
}
updateCrossTxHash(ctx, l2result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
continue
}
}
return txHistories, nil
}

View File

@@ -0,0 +1,26 @@
package route
import (
"time"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
"bridge-history-api/config"
"bridge-history-api/internal/controller"
)
// Route routes the APIs
func Route(router *gin.Engine, conf *config.Config) {
router.Use(cors.New(cors.Config{
AllowOrigins: []string{"*"},
AllowMethods: []string{"GET", "POST", "PUT", "DELETE"},
AllowCredentials: true,
MaxAge: 12 * time.Hour,
}))
r := router.Group("api/")
r.GET("/txs", controller.HistoryCtrler.GetAllTxsByAddr)
r.POST("/txsbyhashes", controller.HistoryCtrler.PostQueryTxsByHash)
r.GET("/claimable", controller.HistoryCtrler.GetAllClaimableTxsByAddr)
}

View File

@@ -0,0 +1,95 @@
package types
import (
"net/http"
"time"
"github.com/gin-gonic/gin"
)
const (
// Success shows OK.
Success = 0
// ErrParameterInvalidNo is invalid params
ErrParameterInvalidNo = 40001
// ErrGetClaimablesFailure is getting all claimables txs error
ErrGetClaimablesFailure = 40002
// ErrGetTxsByHashFailure is getting txs by hash list error
ErrGetTxsByHashFailure = 40003
// ErrGetTxsByAddrFailure is getting txs by address error
ErrGetTxsByAddrFailure = 40004
)
// QueryByAddressRequest the request parameter of address api
type QueryByAddressRequest struct {
Address string `form:"address" binding:"required"`
Page int `form:"page" binding:"required"`
PageSize int `form:"page_size" binding:"required"`
}
// QueryByHashRequest the request parameter of hash api
type QueryByHashRequest struct {
Txs []string `raw:"txs" binding:"required"`
}
// ResultData contains return txs and total
type ResultData struct {
Result []*TxHistoryInfo `json:"result"`
Total uint64 `json:"total"`
}
// Response the response schema
type Response struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data interface{} `json:"data"`
}
// Finalized the schema of tx finalized infos
type Finalized struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss
}
// UserClaimInfo the schema of tx claim infos
type UserClaimInfo struct {
From string `json:"from"`
To string `json:"to"`
Value string `json:"value"`
Nonce string `json:"nonce"`
BatchHash string `json:"batch_hash"`
Message string `json:"message"`
Proof string `json:"proof"`
BatchIndex string `json:"batch_index"`
}
// TxHistoryInfo the schema of tx history infos
type TxHistoryInfo struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp *time.Time `json:"blockTimestamp"` // useless
FinalizeTx *Finalized `json:"finalizeTx"`
ClaimInfo *UserClaimInfo `json:"claimInfo"`
CreatedAt *time.Time `json:"createdTime"`
}
// RenderJSON renders response with json
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
var errMsg string
if err != nil {
errMsg = err.Error()
}
renderData := Response{
ErrCode: errCode,
ErrMsg: errMsg,
Data: data,
}
ctx.JSON(http.StatusOK, renderData)
}

View File

@@ -1,11 +0,0 @@
package model
type QueryByAddressRequest struct {
Address string `url:"address"`
Offset int `url:"offset"`
Limit int `url:"limit"`
}
type QueryByHashRequest struct {
Txs []string `url:"txs"`
}

View File

@@ -1,18 +0,0 @@
package model
import "bridge-history-api/service"
type Data struct {
Result []*service.TxHistoryInfo `json:"result"`
Total uint64 `json:"total"`
}
type QueryByAddressResponse struct {
Message string `json:"message"`
Data *Data `json:"data"`
}
type QueryByHashResponse struct {
Message string `json:"message"`
Data *Data `json:"data"`
}

View File

@@ -0,0 +1,103 @@
package orm
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
)
// RollupBatch is the struct for rollup_batch table
type RollupBatch struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
BatchHash string `json:"batch_hash" gorm:"column:batch_hash"`
CommitHeight uint64 `json:"commit_height" gorm:"column:commit_height"`
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewRollupBatch create an RollupBatch instance
func NewRollupBatch(db *gorm.DB) *RollupBatch {
return &RollupBatch{db: db}
}
// TableName returns the table name for the Batch model.
func (*RollupBatch) TableName() string {
return "rollup_batch"
}
// GetLatestRollupBatchProcessedHeight return latest processed height from rollup_batch table
func (r *RollupBatch) GetLatestRollupBatchProcessedHeight(ctx context.Context) (uint64, error) {
var result RollupBatch
err := r.db.WithContext(ctx).Unscoped().Select("commit_height").Order("id desc").First(&result).Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("RollupBatch.GetLatestRollupBatchProcessedHeight error: %w", err)
}
return result.CommitHeight, nil
}
// GetLatestRollupBatch return the latest rollup batch in db
func (r *RollupBatch) GetLatestRollupBatch(ctx context.Context) (*RollupBatch, error) {
var result RollupBatch
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_hash is not NULL").Order("batch_index desc").First(&result).Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("RollupBatch.GetLatestRollupBatch error: %w", err)
}
return &result, nil
}
// GetRollupBatchByIndex return the rollup batch by index
func (r *RollupBatch) GetRollupBatchByIndex(ctx context.Context, index uint64) (*RollupBatch, error) {
var result RollupBatch
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_index = ?", index).First(&result).Error
if err != nil {
return nil, fmt.Errorf("RollupBatch.GetRollupBatchByIndex error: %w", err)
}
return &result, nil
}
// InsertRollupBatch batch insert rollup batch into db and return the transaction
func (r *RollupBatch) InsertRollupBatch(ctx context.Context, batches []*RollupBatch, dbTx ...*gorm.DB) error {
if len(batches) == 0 {
return nil
}
db := r.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
err := db.WithContext(ctx).Model(&RollupBatch{}).Create(&batches).Error
if err != nil {
batchIndexes := make([]uint64, 0, len(batches))
heights := make([]uint64, 0, len(batches))
for _, batch := range batches {
batchIndexes = append(batchIndexes, batch.BatchIndex)
heights = append(heights, batch.CommitHeight)
}
log.Error("failed to insert rollup batch", "batchIndexes", batchIndexes, "heights", heights)
return fmt.Errorf("RollupBatch.InsertRollupBatch error: %w", err)
}
return nil
}
// UpdateRollupBatchWithdrawRoot updates the withdraw_root column in rollup_batch table
func (r *RollupBatch) UpdateRollupBatchWithdrawRoot(ctx context.Context, batchIndex uint64, withdrawRoot string) error {
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_index = ?", batchIndex).Update("withdraw_root", withdrawRoot).Error
if err != nil {
return fmt.Errorf("RollupBatch.UpdateRuollupBatch error: %w", err)
}
return nil
}

View File

@@ -0,0 +1,370 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
)
// AssetType can be ETH/ERC20/ERC1155/ERC721
type AssetType int
// MsgType can be layer1/layer2 msg
type MsgType int
func (a AssetType) String() string {
switch a {
case ETH:
return "ETH"
case ERC20:
return "ERC20"
case ERC1155:
return "ERC1155"
case ERC721:
return "ERC721"
}
return "Unknown Asset Type"
}
const (
// ETH = 0
ETH AssetType = iota
// ERC20 = 1
ERC20
// ERC721 = 2
ERC721
// ERC1155 = 3
ERC1155
)
const (
// UnknownMsg = 0
UnknownMsg MsgType = iota
// Layer1Msg = 1
Layer1Msg
// Layer2Msg = 2
Layer2Msg
)
// CrossMsg represents a cross message from layer 1 to layer 2
type CrossMsg struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id"`
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
Height uint64 `json:"height" gorm:"column:height"`
Sender string `json:"sender" gorm:"column:sender"`
Target string `json:"target" gorm:"column:target"`
Amount string `json:"amount" gorm:"column:amount"`
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:''"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:''"`
Layer1Token string `json:"layer1_token" gorm:"column:layer1_token;default:''"`
Layer2Token string `json:"layer2_token" gorm:"column:layer2_token;default:''"`
TokenIDs string `json:"token_ids" gorm:"column:token_ids;default:''"`
TokenAmounts string `json:"token_amounts" gorm:"column:token_amounts;default:''"`
Asset int `json:"asset" gorm:"column:asset"`
MsgType int `json:"msg_type" gorm:"column:msg_type"`
Timestamp *time.Time `json:"timestamp" gorm:"column:block_timestamp;default;NULL"`
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// TableName returns the table name for the CrossMsg model.
func (*CrossMsg) TableName() string {
return "cross_message"
}
// NewCrossMsg returns a new instance of CrossMsg.
func NewCrossMsg(db *gorm.DB) *CrossMsg {
return &CrossMsg{db: db}
}
// L1 Cross Msgs Operations
// GetL1CrossMsgByHash returns layer1 cross message by given hash
func (c *CrossMsg) GetL1CrossMsgByHash(ctx context.Context, l1Hash common.Hash) (*CrossMsg, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer1_hash = ? AND msg_type = ?", l1Hash.String(), Layer1Msg).First(&result).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("CrossMsg.GetL1CrossMsgByHash error: %w", err)
}
return &result, nil
}
// GetLatestL1ProcessedHeight returns the latest processed height of layer1 cross messages
func (c *CrossMsg) GetLatestL1ProcessedHeight(ctx context.Context) (uint64, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("msg_type = ?", Layer1Msg).
Select("height").
Order("id DESC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("CrossMsg.GetLatestL1ProcessedHeight error: %w", err)
}
return result.Height, nil
}
// GetL1EarliestNoBlockTimestampHeight returns the earliest layer1 cross message height which has no block timestamp
func (c *CrossMsg) GetL1EarliestNoBlockTimestampHeight(ctx context.Context) (uint64, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("block_timestamp IS NULL AND msg_type = ?", Layer1Msg).
Select("height").
Order("height ASC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("CrossMsg.GetL1EarliestNoBlockTimestampHeight error: %w", err)
}
return result.Height, nil
}
// InsertL1CrossMsg batch insert layer1 cross messages into db
func (c *CrossMsg) InsertL1CrossMsg(ctx context.Context, messages []*CrossMsg, dbTx ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&CrossMsg{}).Create(&messages).Error
if err != nil {
l1hashes := make([]string, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
l1hashes = append(l1hashes, msg.Layer1Hash)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l1 cross messages", "l1hashes", l1hashes, "heights", heights, "err", err)
return fmt.Errorf("CrossMsg.InsertL1CrossMsg error: %w", err)
}
return nil
}
// UpdateL1CrossMsgHash update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
func (c *CrossMsg) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash, dbTx ...*gorm.DB) error {
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := c.db.Model(&CrossMsg{}).Where("layer1_hash = ?", l1Hash.Hex()).Update("msg_hash", msgHash.Hex()).Error
if err != nil {
return fmt.Errorf("CrossMsg.UpdateL1CrossMsgHash error: %w", err)
}
return nil
}
// UpdateL1BlockTimestamp update layer1 block timestamp
func (c *CrossMsg) UpdateL1BlockTimestamp(ctx context.Context, height uint64, timestamp time.Time) error {
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("height = ? AND msg_type = ?", height, Layer1Msg).
Update("block_timestamp", timestamp).Error
if err != nil {
return fmt.Errorf("CrossMsg.UpdateL1BlockTimestamp error: %w", err)
}
return err
}
// DeleteL1CrossMsgAfterHeight soft delete layer1 cross messages after given height
func (c *CrossMsg) DeleteL1CrossMsgAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Delete(&CrossMsg{}, "height > ? AND msg_type = ?", height, Layer1Msg).Error
if err != nil {
return fmt.Errorf("CrossMsg.DeleteL1CrossMsgAfterHeight error: %w", err)
}
return nil
}
// L2 Cross Msgs Operations
// GetL2CrossMsgByHash returns layer2 cross message by given hash
func (c *CrossMsg) GetL2CrossMsgByHash(ctx context.Context, l2Hash common.Hash) (*CrossMsg, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer2_hash = ? AND msg_type = ?", l2Hash.String(), Layer1Msg).First(&result).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("CrossMsg.GetL2CrossMsgByHash error: %w", err)
}
return &result, nil
}
// GetLatestL2ProcessedHeight returns the latest processed height of layer2 cross messages
func (c *CrossMsg) GetLatestL2ProcessedHeight(ctx context.Context) (uint64, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Select("height").
Where("msg_type = ?", Layer2Msg).
Order("id DESC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("CrossMsg.GetLatestL2ProcessedHeight error: %w", err)
}
return result.Height, nil
}
// GetL2CrossMsgByMsgHashList returns layer2 cross messages under given msg hashes
func (c *CrossMsg) GetL2CrossMsgByMsgHashList(ctx context.Context, msgHashList []string) ([]*CrossMsg, error) {
var results []*CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("msg_hash IN (?) AND msg_type = ?", msgHashList, Layer2Msg).
Find(&results).
Error
if err != nil {
return nil, fmt.Errorf("CrossMsg.GetL2CrossMsgByMsgHashList error: %w", err)
}
if len(results) == 0 {
log.Debug("no CrossMsg under given msg hashes", "msg hash list", msgHashList)
}
return results, nil
}
// GetL2EarliestNoBlockTimestampHeight returns the earliest layer2 cross message height which has no block timestamp
func (c *CrossMsg) GetL2EarliestNoBlockTimestampHeight(ctx context.Context) (uint64, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("block_timestamp IS NULL AND msg_type = ?", Layer2Msg).
Select("height").
Order("height ASC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("CrossMsg.GetL2EarliestNoBlockTimestampHeight error: %w", err)
}
return result.Height, nil
}
// InsertL2CrossMsg batch insert layer2 cross messages
func (c *CrossMsg) InsertL2CrossMsg(ctx context.Context, messages []*CrossMsg, dbTx ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&CrossMsg{}).Create(&messages).Error
if err != nil {
l2hashes := make([]string, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
l2hashes = append(l2hashes, msg.Layer2Hash)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l2 cross messages", "l2hashes", l2hashes, "heights", heights, "err", err)
return fmt.Errorf("CrossMsg.InsertL2CrossMsg error: %w", err)
}
return nil
}
// UpdateL2CrossMsgHash update layer2 cross message hash
func (c *CrossMsg) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash, dbTx ...*gorm.DB) error {
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&CrossMsg{}).
Where("layer2_hash = ?", l2Hash.String()).
Update("msg_hash", msgHash.String()).
Error
if err != nil {
return fmt.Errorf("CrossMsg.UpdateL2CrossMsgHash error: %w", err)
}
return nil
}
// UpdateL2BlockTimestamp update layer2 cross message block timestamp
func (c *CrossMsg) UpdateL2BlockTimestamp(ctx context.Context, height uint64, timestamp time.Time) error {
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("height = ? AND msg_type = ?", height, Layer2Msg).
Update("block_timestamp", timestamp).Error
if err != nil {
return fmt.Errorf("CrossMsg.UpdateL2BlockTimestamp error: %w", err)
}
return nil
}
// DeleteL2CrossMsgFromHeight delete layer2 cross messages from given height
func (c *CrossMsg) DeleteL2CrossMsgFromHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&CrossMsg{}).Delete("height > ? AND msg_type = ?", height, Layer2Msg).Error
if err != nil {
return fmt.Errorf("CrossMsg.DeleteL2CrossMsgFromHeight error: %w", err)
}
return nil
}
// General Operations
// GetTotalCrossMsgCountByAddress get total cross msg count by address
func (c *CrossMsg) GetTotalCrossMsgCountByAddress(ctx context.Context, sender string) (uint64, error) {
var count int64
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("sender = ?", sender).
Count(&count).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("CrossMsg.GetTotalCrossMsgCountByAddress error: %w", err)
}
return uint64(count), nil
}
// GetCrossMsgsByAddressWithOffset get cross msgs by address with offset
func (c *CrossMsg) GetCrossMsgsByAddressWithOffset(ctx context.Context, sender string, offset int, limit int) ([]CrossMsg, error) {
var messages []CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("sender = ?", sender).
Order("block_timestamp DESC NULLS FIRST, id DESC").
Limit(limit).
Offset(offset).
Find(&messages).
Error
if err != nil {
return nil, fmt.Errorf("CrossMsg.GetCrossMsgsByAddressWithOffset error: %w", err)
}
return messages, nil
}

View File

@@ -0,0 +1,215 @@
package orm
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
)
// L2SentMsg defines the struct for l2_sent_msg table record
type L2SentMsg struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id"`
OriginalSender string `json:"original_sender" gorm:"column:original_sender;default:''"`
TxHash string `json:"tx_hash" gorm:"column:tx_hash"`
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
Sender string `json:"sender" gorm:"column:sender"`
Target string `json:"target" gorm:"column:target"`
Value string `json:"value" gorm:"column:value"`
Height uint64 `json:"height" gorm:"column:height"`
Nonce uint64 `json:"nonce" gorm:"column:nonce"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index;default:0"`
MsgProof string `json:"msg_proof" gorm:"column:msg_proof;default:''"`
MsgData string `json:"msg_data" gorm:"column:msg_data;default:''"`
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewL2SentMsg create an NewL2SentMsg instance
func NewL2SentMsg(db *gorm.DB) *L2SentMsg {
return &L2SentMsg{db: db}
}
// TableName returns the table name for the L2SentMsg model.
func (*L2SentMsg) TableName() string {
return "l2_sent_msg"
}
// GetL2SentMsgByHash get l2 sent msg by hash
func (l *L2SentMsg) GetL2SentMsgByHash(ctx context.Context, msgHash string) (*L2SentMsg, error) {
var result L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("msg_hash = ?", msgHash).
First(&result).
Error
if err != nil {
return nil, fmt.Errorf("L2SentMsg.GetL2SentMsgByHash error: %w", err)
}
return &result, nil
}
// GetLatestSentMsgHeightOnL2 get latest sent msg height on l2
func (l *L2SentMsg) GetLatestSentMsgHeightOnL2(ctx context.Context) (uint64, error) {
var result L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Select("height").
Order("nonce DESC").
First(&result).Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("L2SentMsg.GetLatestSentMsgHeightOnL2 error: %w", err)
}
return result.Height, nil
}
// GetClaimableL2SentMsgByAddressWithOffset get claimable l2 sent msg by address with offset
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressWithOffset(ctx context.Context, address string, offset int, limit int) ([]*L2SentMsg, error) {
var results []*L2SentMsg
err := l.db.WithContext(ctx).Raw(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='' ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset).
Scan(&results).Error
if err != nil {
return nil, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressWithOffset error: %w", err)
}
return results, nil
}
// GetClaimableL2SentMsgByAddressTotalNum get claimable l2 sent msg by address total num
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressTotalNum(ctx context.Context, address string) (uint64, error) {
var count uint64
err := l.db.WithContext(ctx).Raw(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='';`, address).
Scan(&count).Error
if err != nil {
return 0, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressTotalNum error: %w", err)
}
return count, nil
}
// GetLatestL2SentMsgBatchIndex get latest l2 sent msg batch index
func (l *L2SentMsg) GetLatestL2SentMsgBatchIndex(ctx context.Context) (int64, error) {
var result L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("batch_index != 0").
Order("batch_index DESC").
Select("batch_index").
First(&result).
Error
if err != nil {
return -1, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgBatchIndex error: %w", err)
}
// Watch for overflow, tho its not likely to happen
return int64(result.Height), nil
}
// GetL2SentMsgMsgHashByHeightRange get l2 sent msg msg hash by height range
func (l *L2SentMsg) GetL2SentMsgMsgHashByHeightRange(ctx context.Context, startHeight, endHeight uint64) ([]*L2SentMsg, error) {
var results []*L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("height >= ? AND height <= ?", startHeight, endHeight).
Order("nonce ASC").
Find(&results).
Error
if err != nil {
return nil, fmt.Errorf("L2SentMsg.GetL2SentMsgMsgHashByHeightRange error: %w", err)
}
return results, nil
}
// GetL2SentMessageByNonce get l2 sent message by nonce
func (l *L2SentMsg) GetL2SentMessageByNonce(ctx context.Context, nonce uint64) (*L2SentMsg, error) {
var result L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("nonce = ?", nonce).
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("L2SentMsg.GetL2SentMessageByNonce error: %w", err)
}
return &result, nil
}
// GetLatestL2SentMsgLEHeight get latest l2 sent msg less than or equal to end block number
func (l *L2SentMsg) GetLatestL2SentMsgLEHeight(ctx context.Context, endBlockNumber uint64) (*L2SentMsg, error) {
var result L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("height <= ?", endBlockNumber).
Order("nonce DESC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgLEHeight error: %w", err)
}
return &result, nil
}
// InsertL2SentMsg batch insert l2 sent msg
func (l *L2SentMsg) InsertL2SentMsg(ctx context.Context, messages []*L2SentMsg, dbTx ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := l.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&L2SentMsg{}).Create(&messages).Error
if err != nil {
l2hashes := make([]string, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
l2hashes = append(l2hashes, msg.TxHash)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l2 sent messages", "l2hashes", l2hashes, "heights", heights, "err", err)
return fmt.Errorf("L2SentMsg.InsertL2SentMsg error: %w", err)
}
return nil
}
// UpdateL2MessageProof update l2 message proof in db tx
func (l *L2SentMsg) UpdateL2MessageProof(ctx context.Context, msgHash string, proof string, batchIndex uint64, dbTx ...*gorm.DB) error {
db := l.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&L2SentMsg{}).
Where("msg_hash = ?", msgHash).
Updates(map[string]interface{}{
"msg_proof": proof,
"batch_index": batchIndex,
}).Error
if err != nil {
return fmt.Errorf("L2SentMsg.UpdateL2MessageProof error: %w", err)
}
return nil
}
// DeleteL2SentMsgAfterHeight delete l2 sent msg after height
func (l *L2SentMsg) DeleteL2SentMsgAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
db := l.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
err := db.WithContext(ctx).Model(&L2SentMsg{}).Delete("height > ?", height).Error
if err != nil {
return fmt.Errorf("L2SentMsg.DeleteL2SentMsgAfterHeight error: %w", err)
}
return nil
}

View File

@@ -3,7 +3,7 @@
create table cross_message create table cross_message
( (
id BIGSERIAL PRIMARY KEY, id BIGSERIAL PRIMARY KEY,
msg_hash VARCHAR NOT NULL DEFAULT '', msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL, height BIGINT NOT NULL,
sender VARCHAR NOT NULL, sender VARCHAR NOT NULL,
target VARCHAR NOT NULL, target VARCHAR NOT NULL,
@@ -14,10 +14,8 @@ create table cross_message
layer2_token VARCHAR NOT NULL DEFAULT '', layer2_token VARCHAR NOT NULL DEFAULT '',
asset SMALLINT NOT NULL, asset SMALLINT NOT NULL,
msg_type SMALLINT NOT NULL, msg_type SMALLINT NOT NULL,
-- use array to support nft bridge token_ids TEXT NOT NULL DEFAULT '',
token_ids VARCHAR[] NOT NULL DEFAULT '{}', token_amounts TEXT NOT NULL DEFAULT '',
-- use array to support nft bridge
token_amounts VARCHAR[] NOT NULL DEFAULT '{}',
block_timestamp TIMESTAMP(0) DEFAULT NULL, block_timestamp TIMESTAMP(0) DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,

View File

@@ -19,6 +19,8 @@ CREATE INDEX idx_l1_msg_relayed_msg ON relayed_msg (layer1_hash, deleted_at);
CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at); CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at);
CREATE INDEX idx_msg_hash_deleted_at_relayed_msg on relayed_msg (msg_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp() CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$ RETURNS TRIGGER AS $$
BEGIN BEGIN

View File

@@ -3,7 +3,8 @@
create table l2_sent_msg create table l2_sent_msg
( (
id BIGSERIAL PRIMARY KEY, id BIGSERIAL PRIMARY KEY,
tx_sender VARCHAR NOT NULL, original_sender VARCHAR NOT NULL DEFAULT '',
tx_hash VARCHAR NOT NULL,
sender VARCHAR NOT NULL, sender VARCHAR NOT NULL,
target VARCHAR NOT NULL, target VARCHAR NOT NULL,
value VARCHAR NOT NULL, value VARCHAR NOT NULL,
@@ -24,6 +25,8 @@ on l2_sent_msg (msg_hash) where deleted_at IS NULL;
create unique index uk_nonce create unique index uk_nonce
on l2_sent_msg (nonce) where deleted_at IS NULL; on l2_sent_msg (nonce) where deleted_at IS NULL;
CREATE INDEX idx_msg_hash_deleted_at_l2_sent_msg on l2_sent_msg (msg_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp() CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$ RETURNS TRIGGER AS $$
BEGIN BEGIN

View File

@@ -8,6 +8,7 @@ create table rollup_batch
start_block_number BIGINT NOT NULL, start_block_number BIGINT NOT NULL,
end_block_number BIGINT NOT NULL, end_block_number BIGINT NOT NULL,
batch_hash VARCHAR NOT NULL, batch_hash VARCHAR NOT NULL,
withdraw_root TEXT DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL deleted_at TIMESTAMP(0) DEFAULT NULL

View File

@@ -0,0 +1,142 @@
package orm
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
)
// RelayedMsg is the struct for relayed_msg table
type RelayedMsg struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id"`
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
Height uint64 `json:"height" gorm:"column:height"`
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:''"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:''"`
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewRelayedMsg create an NewRelayedMsg instance
func NewRelayedMsg(db *gorm.DB) *RelayedMsg {
return &RelayedMsg{db: db}
}
// TableName returns the table name for the RelayedMsg model.
func (*RelayedMsg) TableName() string {
return "relayed_msg"
}
// GetRelayedMsgByHash get relayed msg by hash
func (r *RelayedMsg) GetRelayedMsgByHash(ctx context.Context, msgHash string) (*RelayedMsg, error) {
var result RelayedMsg
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
Where("msg_hash = ?", msgHash).
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("RelayedMsg.GetRelayedMsgByHash error: %w", err)
}
return &result, nil
}
// GetLatestRelayedHeightOnL1 get latest relayed height on l1
func (r *RelayedMsg) GetLatestRelayedHeightOnL1(ctx context.Context) (uint64, error) {
var result RelayedMsg
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
Select("height").
Where("layer1_hash != ''").
Order("height DESC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("RelayedMsg.GetLatestRelayedHeightOnL1 error: %w", err)
}
return result.Height, err
}
// GetLatestRelayedHeightOnL2 get latest relayed height on l2
func (r *RelayedMsg) GetLatestRelayedHeightOnL2(ctx context.Context) (uint64, error) {
var result RelayedMsg
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
Select("height").
Where("layer2_hash != ''").
Order("height DESC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("RelayedMsg.GetLatestRelayedHeightOnL2 error: %w", err)
}
return result.Height, nil
}
// InsertRelayedMsg batch insert relayed msg into db and return the transaction
func (r *RelayedMsg) InsertRelayedMsg(ctx context.Context, messages []*RelayedMsg, dbTx ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := r.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&RelayedMsg{}).Create(&messages).Error
if err != nil {
l2hashes := make([]string, 0, len(messages))
l1hashes := make([]string, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
l2hashes = append(l2hashes, msg.Layer2Hash)
l1hashes = append(l1hashes, msg.Layer1Hash)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l2 sent messages", "l2hashes", l2hashes, "l1hashes", l1hashes, "heights", heights, "err", err)
return fmt.Errorf("RelayedMsg.InsertRelayedMsg error: %w", err)
}
return nil
}
// DeleteL1RelayedHashAfterHeight delete l1 relayed hash after height
func (r *RelayedMsg) DeleteL1RelayedHashAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
db := r.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&RelayedMsg{}).
Delete("height > ? AND layer1_hash != ''", height).Error
if err != nil {
return fmt.Errorf("RelayedMsg.DeleteL1RelayedHashAfterHeight error: %w", err)
}
return nil
}
// DeleteL2RelayedHashAfterHeight delete l2 relayed hash after heights
func (r *RelayedMsg) DeleteL2RelayedHashAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
db := r.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&RelayedMsg{}).
Delete("height > ? AND layer2_hash != ''", height).Error
if err != nil {
return fmt.Errorf("RelayedMsg.DeleteL2RelayedHashAfterHeight error: %w", err)
}
return nil
}

View File

@@ -1,188 +0,0 @@
package service
import (
"strconv"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"bridge-history-api/db"
"bridge-history-api/db/orm"
)
type Finalized struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss
}
type UserClaimInfo struct {
From string `json:"from"`
To string `json:"to"`
Value string `json:"value"`
Nonce string `json:"nonce"`
BatchHash string `json:"batch_hash"`
Message string `json:"message"`
Proof string `json:"proof"`
BatchIndex string `json:"batch_index"`
}
type TxHistoryInfo struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp *time.Time `json:"blockTimestamp"` // useless
FinalizeTx *Finalized `json:"finalizeTx"`
ClaimInfo *UserClaimInfo `json:"claimInfo"`
CreatedAt *time.Time `json:"createdTime"`
}
// HistoryService example service.
type HistoryService interface {
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error)
}
// NewHistoryService returns a service backed with a "db"
func NewHistoryService(db db.OrmFactory) HistoryService {
service := &historyBackend{db: db, prefix: "Scroll-Bridge-History-Server"}
return service
}
type historyBackend struct {
prefix string
db db.OrmFactory
}
func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
l2sentMsg, err := db.GetL2SentMsgByHash(msgHash)
if err != nil {
log.Debug("GetCrossTxClaimInfo failed", "error", err)
return &UserClaimInfo{}
}
batch, err := db.GetRollupBatchByIndex(l2sentMsg.BatchIndex)
if err != nil {
log.Debug("GetCrossTxClaimInfo failed", "error", err)
return &UserClaimInfo{}
}
return &UserClaimInfo{
From: l2sentMsg.Sender,
To: l2sentMsg.Target,
Value: l2sentMsg.Value,
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
Message: l2sentMsg.MsgData,
Proof: "0x" + l2sentMsg.MsgProof,
BatchHash: batch.BatchHash,
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
}
}
func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory) {
relayed, err := db.GetRelayedMsgByHash(msgHash)
if err != nil {
log.Error("updateCrossTxHash failed", "error", err)
return
}
if relayed == nil {
return
}
if relayed.Layer1Hash != "" {
txInfo.FinalizeTx.Hash = relayed.Layer1Hash
txInfo.FinalizeTx.BlockNumber = relayed.Height
return
}
if relayed.Layer2Hash != "" {
txInfo.FinalizeTx.Hash = relayed.Layer2Hash
txInfo.FinalizeTx.BlockNumber = relayed.Height
return
}
}
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
var txHistories []*TxHistoryInfo
total, err := h.db.GetTotalCrossMsgCountByAddress(address.String())
if err != nil || total == 0 {
return txHistories, 0, err
}
result, err := h.db.GetCrossMsgsByAddressWithOffset(address.String(), offset, limit)
if err != nil {
return nil, 0, err
}
for _, msg := range result {
txHistory := &TxHistoryInfo{
Hash: msg.Layer1Hash + msg.Layer2Hash,
Amount: msg.Amount,
To: msg.Target,
IsL1: msg.MsgType == int(orm.Layer1Msg),
BlockNumber: msg.Height,
BlockTimestamp: msg.Timestamp,
CreatedAt: msg.CreatedAt,
FinalizeTx: &Finalized{
Hash: "",
},
ClaimInfo: GetCrossTxClaimInfo(msg.MsgHash, h.db),
}
updateCrossTxHash(msg.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
}
return txHistories, total, nil
}
func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) {
txHistories := make([]*TxHistoryInfo, 0)
for _, hash := range hashes {
l1result, err := h.db.GetL1CrossMsgByHash(common.HexToHash(hash))
if err != nil {
return nil, err
}
if l1result != nil {
txHistory := &TxHistoryInfo{
Hash: l1result.Layer1Hash,
Amount: l1result.Amount,
To: l1result.Target,
IsL1: true,
BlockNumber: l1result.Height,
BlockTimestamp: l1result.Timestamp,
CreatedAt: l1result.CreatedAt,
FinalizeTx: &Finalized{
Hash: "",
},
}
updateCrossTxHash(l1result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
continue
}
l2result, err := h.db.GetL2CrossMsgByHash(common.HexToHash(hash))
if err != nil {
return nil, err
}
if l2result != nil {
txHistory := &TxHistoryInfo{
Hash: l2result.Layer2Hash,
Amount: l2result.Amount,
To: l2result.Target,
IsL1: false,
BlockNumber: l2result.Height,
BlockTimestamp: l2result.Timestamp,
CreatedAt: l2result.CreatedAt,
FinalizeTx: &Finalized{
Hash: "",
},
ClaimInfo: GetCrossTxClaimInfo(l2result.MsgHash, h.db),
}
updateCrossTxHash(l2result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
continue
}
}
return txHistories, nil
}

View File

@@ -0,0 +1,103 @@
package utils
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"gorm.io/gorm/utils"
"bridge-history-api/config"
)
type gormLogger struct {
gethLogger log.Logger
}
func (g *gormLogger) LogMode(level logger.LogLevel) logger.Interface {
return g
}
func (g *gormLogger) Info(_ context.Context, msg string, data ...interface{}) {
infoMsg := fmt.Sprintf(msg, data...)
g.gethLogger.Info("gorm", "info message", infoMsg)
}
func (g *gormLogger) Warn(_ context.Context, msg string, data ...interface{}) {
warnMsg := fmt.Sprintf(msg, data...)
g.gethLogger.Warn("gorm", "warn message", warnMsg)
}
func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
errMsg := fmt.Sprintf(msg, data...)
g.gethLogger.Error("gorm", "err message", errMsg)
}
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, rowsAffected := fc()
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
}
// InitDB init the db handler
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
tmpGormLogger := gormLogger{
gethLogger: log.Root(),
}
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: &tmpGormLogger,
NowFunc: func() time.Time {
// why set time to UTC.
// if now set this, the inserted data time will use local timezone. like 2023-07-18 18:24:00 CST+8
// but when inserted, store to postgres is 2023-07-18 18:24:00 UTC+0 the timezone is incorrect.
// As mysql dsn user:pass@tcp(127.0.0.1:3306)/dbname?charset=utf8mb4&parseTime=True&loc=Local, we cant set
// the timezone by loc=Local. but postgres's dsn don't have loc option to set timezone, so just need set the gorm option like that.
t, err := nowUTC()
if err != nil {
log.Error("Can not get UTC time: ", "err", err)
}
return t
},
})
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
}
// CloseDB close the db handler. notice the db handler only can close when then program exit.
func CloseDB(db *gorm.DB) error {
sqlDB, err := db.DB()
if err != nil {
return err
}
if err := sqlDB.Close(); err != nil {
return err
}
return nil
}
// nowUTC get the utc time.Now
func nowUTC() (time.Time, error) {
utc, err := time.LoadLocation("")
if err != nil {
return time.Time{}, err
}
return time.Now().In(utc), nil
}

View File

@@ -2,27 +2,19 @@ package utils
import ( import (
"context" "context"
"math/big"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
backendabi "bridge-history-api/abi" backendabi "bridge-history-api/abi"
"bridge-history-api/db/orm" "bridge-history-api/orm"
) )
type MsgHashWrapper struct { // CachedParsedTxCalldata store parsed batch infos
MsgHash common.Hash
TxHash common.Hash
}
type L2SentMsgWrapper struct {
L2SentMsg *orm.L2SentMsg
TxHash common.Hash
}
type CachedParsedTxCalldata struct { type CachedParsedTxCalldata struct {
CallDataIndex uint64 CallDataIndex uint64
BatchIndices []uint64 BatchIndices []uint64
@@ -30,13 +22,14 @@ type CachedParsedTxCalldata struct {
EndBlocks []uint64 EndBlocks []uint64
} }
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, error) { // ParseBackendL1EventLogs parses L1 watched events
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log // Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up // Can only be tested after we have our contracts set up
var l1CrossMsg []*orm.CrossMsg var l1CrossMsg []*orm.CrossMsg
var relayedMsgs []*orm.RelayedMsg var relayedMsgs []*orm.RelayedMsg
var msgHashes []MsgHashWrapper var msgHash string
for _, vlog := range logs { for _, vlog := range logs {
switch vlog.Topics[0] { switch vlog.Topics[0] {
case backendabi.L1DepositETHSig: case backendabi.L1DepositETHSig:
@@ -44,7 +37,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog) err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositETH event", "err", err) log.Warn("Failed to unpack DepositETH event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -53,13 +46,15 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Amount: event.Amount.String(), Amount: event.Amount.String(),
Asset: int(orm.ETH), Asset: int(orm.ETH),
Layer1Hash: vlog.TxHash.Hex(), Layer1Hash: vlog.TxHash.Hex(),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
}) })
case backendabi.L1DepositERC20Sig: case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{} event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog) err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositERC20 event", "err", err) log.Warn("Failed to unpack DepositERC20 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -70,13 +65,15 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Layer1Hash: vlog.TxHash.Hex(), Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
}) })
case backendabi.L1DepositERC721Sig: case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{} event := backendabi.ERC721MessageEvent{}
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog) err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositERC721 event", "err", err) log.Warn("Failed to unpack DepositERC721 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -86,14 +83,16 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Layer1Hash: vlog.TxHash.Hex(), Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()}, TokenIDs: event.TokenID.String(),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
}) })
case backendabi.L1DepositERC1155Sig: case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{} event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog) err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack DepositERC1155 event", "err", err) log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{ l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
@@ -103,26 +102,65 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
Layer1Hash: vlog.TxHash.Hex(), Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()}, TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(), Amount: event.Amount.String(),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
}) })
case backendabi.L1SentMessageEventSignature: case backendabi.L1SentMessageEventSignature:
event := backendabi.L1SentMessageEvent{} event := backendabi.L1SentMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog) err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err) log.Warn("Failed to unpack SentMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message) // since every deposit event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
msgHashes = append(msgHashes, MsgHashWrapper{ msgHash = ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).Hex()
MsgHash: msgHash, case backendabi.L1BatchDepositERC721Sig:
TxHash: vlog.TxHash}) event := backendabi.BatchERC721MessageEvent{}
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "BatchDepositERC721", vlog)
if err != nil {
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
})
case backendabi.L1BatchDepositERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
})
case backendabi.L1RelayedMessageEventSignature: case backendabi.L1RelayedMessageEventSignature:
event := backendabi.L1RelayedMessageEvent{} event := backendabi.L1RelayedMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog) err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err) log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err return l1CrossMsg, relayedMsgs, err
} }
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{ relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(), MsgHash: event.MessageHash.String(),
@@ -133,17 +171,18 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrappe
} }
} }
return l1CrossMsg, msgHashes, relayedMsgs, nil return l1CrossMsg, relayedMsgs, nil
} }
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []L2SentMsgWrapper, error) { // ParseBackendL2EventLogs parses L2 watched events
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
// Need use contract abi to parse event Log // Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up // Can only be tested after we have our contracts set up
var l2CrossMsg []*orm.CrossMsg var l2CrossMsg []*orm.CrossMsg
// this is use to confirm finalized l1 msg // this is use to confirm finalized l1 msg
var relayedMsgs []*orm.RelayedMsg var relayedMsgs []*orm.RelayedMsg
var l2SentMsg []L2SentMsgWrapper var l2SentMsgs []*orm.L2SentMsg
for _, vlog := range logs { for _, vlog := range logs {
switch vlog.Topics[0] { switch vlog.Topics[0] {
case backendabi.L2WithdrawETHSig: case backendabi.L2WithdrawETHSig:
@@ -151,8 +190,9 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog) err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawETH event", "err", err) log.Warn("Failed to unpack WithdrawETH event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -160,14 +200,17 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Amount: event.Amount.String(), Amount: event.Amount.String(),
Asset: int(orm.ETH), Asset: int(orm.ETH),
Layer2Hash: vlog.TxHash.Hex(), Layer2Hash: vlog.TxHash.Hex(),
MsgType: int(orm.Layer2Msg),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
}) })
case backendabi.L2WithdrawERC20Sig: case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{} event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog) err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawERC20 event", "err", err) log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -177,14 +220,17 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Hash: vlog.TxHash.Hex(), Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
MsgType: int(orm.Layer2Msg),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
}) })
case backendabi.L2WithdrawERC721Sig: case backendabi.L2WithdrawERC721Sig:
event := backendabi.ERC721MessageEvent{} event := backendabi.ERC721MessageEvent{}
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog) err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawERC721 event", "err", err) log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -193,15 +239,18 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Hash: vlog.TxHash.Hex(), Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()}, TokenIDs: event.TokenID.String(),
MsgType: int(orm.Layer2Msg),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
}) })
case backendabi.L2WithdrawERC1155Sig: case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{} event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog) err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err) log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{ l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber, Height: vlog.BlockNumber,
Sender: event.From.String(), Sender: event.From.String(),
@@ -210,36 +259,78 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
Layer2Hash: vlog.TxHash.Hex(), Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(), Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(), Layer2Token: event.L2Token.Hex(),
TokenIDs: []string{event.TokenID.String()}, TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(), Amount: event.Amount.String(),
MsgType: int(orm.Layer2Msg),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2BatchWithdrawERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
MsgType: int(orm.Layer2Msg),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2BatchWithdrawERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
MsgType: int(orm.Layer2Msg),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
}) })
case backendabi.L2SentMessageEventSignature: case backendabi.L2SentMessageEventSignature:
event := backendabi.L2SentMessageEvent{} event := backendabi.L2SentMessageEvent{}
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog) err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err) log.Warn("Failed to unpack SentMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
// since every withdraw event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message) msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
l2SentMsg = append(l2SentMsg, l2SentMsgs = append(l2SentMsgs,
L2SentMsgWrapper{ &orm.L2SentMsg{
TxHash: vlog.TxHash, Sender: event.Sender.Hex(),
L2SentMsg: &orm.L2SentMsg{ TxHash: vlog.TxHash.Hex(),
Sender: event.Sender.Hex(), Target: event.Target.Hex(),
Target: event.Target.Hex(), Value: event.Value.String(),
Value: event.Value.String(), MsgHash: msgHash.Hex(),
MsgHash: msgHash.Hex(), Height: vlog.BlockNumber,
Height: vlog.BlockNumber, Nonce: event.MessageNonce.Uint64(),
Nonce: event.MessageNonce.Uint64(), MsgData: hexutil.Encode(event.Message),
MsgData: hexutil.Encode(event.Message),
},
}) })
case backendabi.L2RelayedMessageEventSignature: case backendabi.L2RelayedMessageEventSignature:
event := backendabi.L2RelayedMessageEvent{} event := backendabi.L2RelayedMessageEvent{}
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog) err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil { if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err) log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsg, err return l2CrossMsg, relayedMsgs, l2SentMsgs, err
} }
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{ relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(), MsgHash: event.MessageHash.String(),
@@ -249,12 +340,12 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
} }
} }
return l2CrossMsg, relayedMsgs, l2SentMsg, nil return l2CrossMsg, relayedMsgs, l2SentMsgs, nil
} }
// ParseBatchInfoFromScrollChain parses ScrollChain events
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) { func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
var rollupBatches []*orm.RollupBatch var rollupBatches []*orm.RollupBatch
cache := make(map[string]CachedParsedTxCalldata)
for _, vlog := range logs { for _, vlog := range logs {
switch vlog.Topics[0] { switch vlog.Topics[0] {
case backendabi.L1CommitBatchEventSignature: case backendabi.L1CommitBatchEventSignature:
@@ -264,42 +355,22 @@ func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client
log.Warn("Failed to unpack CommitBatch event", "err", err) log.Warn("Failed to unpack CommitBatch event", "err", err)
return rollupBatches, err return rollupBatches, err
} }
if _, ok := cache[vlog.TxHash.Hex()]; ok {
c := cache[vlog.TxHash.Hex()]
c.CallDataIndex++
rollupBatches = append(rollupBatches, &orm.RollupBatch{
CommitHeight: vlog.BlockNumber,
BatchIndex: c.BatchIndices[c.CallDataIndex],
BatchHash: event.BatchHash.Hex(),
StartBlockNumber: c.StartBlocks[c.CallDataIndex],
EndBlockNumber: c.EndBlocks[c.CallDataIndex],
})
cache[vlog.TxHash.Hex()] = c
continue
}
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash) commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending { if err != nil || isPending {
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err) log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
return rollupBatches, err return rollupBatches, err
} }
indices, startBlocks, endBlocks, err := GetBatchRangeFromCalldataV1(commitTx.Data()) index, startBlock, endBlock, err := GetBatchRangeFromCalldataV2(commitTx.Data())
if err != nil { if err != nil {
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().Hex(), "height", vlog.BlockNumber) log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().Hex(), "height", vlog.BlockNumber)
return rollupBatches, err return rollupBatches, err
} }
cache[vlog.TxHash.Hex()] = CachedParsedTxCalldata{
CallDataIndex: 0,
BatchIndices: indices,
StartBlocks: startBlocks,
EndBlocks: endBlocks,
}
rollupBatches = append(rollupBatches, &orm.RollupBatch{ rollupBatches = append(rollupBatches, &orm.RollupBatch{
CommitHeight: vlog.BlockNumber, CommitHeight: vlog.BlockNumber,
BatchIndex: indices[0], BatchIndex: index,
BatchHash: event.BatchHash.Hex(), BatchHash: event.BatchHash.Hex(),
StartBlockNumber: startBlocks[0], StartBlockNumber: startBlock,
EndBlockNumber: endBlocks[0], EndBlockNumber: endBlock,
}) })
default: default:
@@ -308,3 +379,13 @@ func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client
} }
return rollupBatches, nil return rollupBatches, nil
} }
func convertBigIntArrayToString(array []*big.Int) string {
stringArray := make([]string, len(array))
for i, num := range array {
stringArray[i] = num.String()
}
result := strings.Join(stringArray, ", ")
return result
}

View File

@@ -22,6 +22,7 @@ func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...))) return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
} }
// GetSafeBlockNumber get the safe block number, which is the current block number minus the confirmations
func GetSafeBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) { func GetSafeBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
number, err := client.BlockNumber(ctx) number, err := client.BlockNumber(ctx)
if err != nil || number <= confirmations { if err != nil || number <= confirmations {

View File

@@ -11,13 +11,13 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -29,18 +29,18 @@ func init() {
app.Name = "event-watcher" app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher" app.Usage = "The Scroll Event Watcher"
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return utils.LogSetup(ctx)
} }
// Register `event-watcher-test` app for integration-test. // Register `event-watcher-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.EventWatcherApp) utils.RegisterSimulation(app, utils.EventWatcherApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name) cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -48,13 +48,13 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := utils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = utils.CloseDB(db); err != nil { if err = database.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -75,14 +75,14 @@ func action(ctx *cli.Context) error {
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db) l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db) l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
go cutils.Loop(subCtx, 10*time.Second, func() { go utils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil { if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr) log.Error("Failed to fetch bridge contract", "err", loopErr)
} }
}) })
// Start l2 watcher process // Start l2 watcher process
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent) go utils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions // Finish start all l2 functions
log.Info("Start event-watcher successfully") log.Info("Start event-watcher successfully")

View File

@@ -11,14 +11,15 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils" butils "scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -31,31 +32,31 @@ func init() {
app.Usage = "The Scroll Gas Oracle" app.Usage = "The Scroll Gas Oracle"
app.Description = "Scroll Gas Oracle." app.Description = "Scroll Gas Oracle."
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return utils.LogSetup(ctx)
} }
// Register `gas-oracle-test` app for integration-test. // Register `gas-oracle-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.GasOracleApp) utils.RegisterSimulation(app, utils.GasOracleApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name) cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
} }
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := utils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = utils.CloseDB(db); err != nil { if err = database.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -76,7 +77,8 @@ func action(ctx *cli.Context) error {
return err return err
} }
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db) l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig) l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil { if err != nil {
@@ -89,8 +91,8 @@ func action(ctx *cli.Context) error {
return err return err
} }
// Start l1 watcher process // Start l1 watcher process
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) { go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations) number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
if loopErr != nil { if loopErr != nil {
log.Error("failed to get block number", "err", loopErr) log.Error("failed to get block number", "err", loopErr)
return return
@@ -102,8 +104,8 @@ func action(ctx *cli.Context) error {
}) })
// Start l1relayer process // Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle) go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle) go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions // Finish start all message relayer functions
log.Info("Start gas-oracle successfully") log.Info("Start gas-oracle successfully")

View File

@@ -10,13 +10,13 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -29,18 +29,18 @@ func init() {
app.Usage = "The Scroll Message Relayer" app.Usage = "The Scroll Message Relayer"
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1." app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return utils.LogSetup(ctx)
} }
// Register `message-relayer-test` app for integration-test. // Register `message-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.MessageRelayerApp) utils.RegisterSimulation(app, utils.MessageRelayerApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name) cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -48,13 +48,13 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := utils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = utils.CloseDB(db); err != nil { if err = database.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -69,7 +69,7 @@ func action(ctx *cli.Context) error {
} }
// Start l1relayer process // Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents) go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
// Finish start all message relayer functions // Finish start all message relayer functions
log.Info("Start message-relayer successfully") log.Info("Start message-relayer successfully")

View File

@@ -11,14 +11,15 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics" "scroll-tech/common/metrics"
cutils "scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/utils" butils "scroll-tech/bridge/internal/utils"
) )
var app *cli.App var app *cli.App
@@ -30,19 +31,19 @@ func init() {
app.Name = "rollup-relayer" app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer" app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, cutils.RollupRelayerFlags...) app.Flags = append(app.Flags, utils.RollupRelayerFlags...)
app.Commands = []*cli.Command{} app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx) return utils.LogSetup(ctx)
} }
// Register `rollup-relayer-test` app for integration-test. // Register `rollup-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.RollupRelayerApp) utils.RegisterSimulation(app, utils.RollupRelayerApp)
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
// Load config file. // Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name) cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile) cfg, err := config.NewConfig(cfgFile)
if err != nil { if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err) log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -50,13 +51,13 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context) subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection // Init db connection
db, err := utils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
log.Crit("failed to init db connection", "err", err) log.Crit("failed to init db connection", "err", err)
} }
defer func() { defer func() {
cancel() cancel()
if err = utils.CloseDB(db); err != nil { if err = database.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err) log.Error("can not close ormFactory", "error", err)
} }
}() }()
@@ -71,7 +72,7 @@ func action(ctx *cli.Context) error {
return err return err
} }
initGenesis := ctx.Bool(cutils.ImportGenesisFlag.Name) initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis) l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
if err != nil { if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err) log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
@@ -90,11 +91,12 @@ func action(ctx *cli.Context) error {
return err return err
} }
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db) l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
// Watcher loop to fetch missing blocks // Watcher loop to fetch missing blocks
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) { go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations) number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
if loopErr != nil { if loopErr != nil {
log.Error("failed to get block number", "err", loopErr) log.Error("failed to get block number", "err", loopErr)
return return
@@ -102,13 +104,13 @@ func action(ctx *cli.Context) error {
l2watcher.TryFetchRunningMissingBlocks(number) l2watcher.TryFetchRunningMissingBlocks(number)
}) })
go cutils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk) go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
go cutils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch) go utils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches) go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches) go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions. // Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully") log.Info("Start rollup-relayer successfully")

View File

@@ -78,14 +78,16 @@
"max_l1_commit_gas_per_chunk": 11234567, "max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345, "max_l1_commit_calldata_size_per_chunk": 112345,
"min_l1_commit_calldata_size_per_chunk": 11234, "min_l1_commit_calldata_size_per_chunk": 11234,
"chunk_timeout_sec": 300 "chunk_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
}, },
"batch_proposer_config": { "batch_proposer_config": {
"max_chunk_num_per_batch": 112, "max_chunk_num_per_batch": 112,
"max_l1_commit_gas_per_batch": 11234567, "max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345, "max_l1_commit_calldata_size_per_batch": 112345,
"min_chunk_num_per_batch": 11, "min_chunk_num_per_batch": 11,
"batch_timeout_sec": 300 "batch_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
} }
}, },
"db_config": { "db_config": {

View File

@@ -4,18 +4,14 @@ go 1.19
require ( require (
github.com/agiledragon/gomonkey/v2 v2.9.0 github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.7
github.com/orcaman/concurrent-map v1.0.0 github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/pressly/goose/v3 v3.7.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
github.com/smartystreets/goconvey v1.8.0 github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.2 github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.1.0 golang.org/x/sync v0.3.0
gorm.io/driver/postgres v1.5.0 gorm.io/gorm v1.25.2
gorm.io/gorm v1.25.1
) )
require ( require (
@@ -25,7 +21,6 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
@@ -34,18 +29,15 @@ require (
github.com/holiman/uint256 v1.2.2 // indirect github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect github.com/huin/goupnp v1.0.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect github.com/jinzhu/now v1.1.5 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect github.com/rjeczalik/notify v0.9.1 // indirect
@@ -60,11 +52,10 @@ require (
github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.10.0 // indirect golang.org/x/crypto v0.11.0 // indirect
golang.org/x/sys v0.9.0 // indirect golang.org/x/sys v0.10.0 // indirect
golang.org/x/text v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.8.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

View File

@@ -18,7 +18,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
@@ -29,9 +28,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -55,13 +51,6 @@ github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixH
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
@@ -69,16 +58,11 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -86,20 +70,15 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -113,16 +92,12 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
@@ -143,16 +118,8 @@ github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg= github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
@@ -160,93 +127,48 @@ github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYm
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q= github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U= gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A= gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=

View File

@@ -4,13 +4,15 @@ import (
"encoding/json" "encoding/json"
"os" "os"
"path/filepath" "path/filepath"
"scroll-tech/common/database"
) )
// Config load configuration items. // Config load configuration items.
type Config struct { type Config struct {
L1Config *L1Config `json:"l1_config"` L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"` L2Config *L2Config `json:"l2_config"`
DBConfig *DBConfig `json:"db_config"` DBConfig *database.Config `json:"db_config"`
} }
// NewConfig returns a new instance of Config. // NewConfig returns a new instance of Config.

View File

@@ -28,19 +28,21 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items. // ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct { type ChunkProposerConfig struct {
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"` MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"` MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"` MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"` MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"` MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"` ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
} }
// BatchProposerConfig loads batch_proposer configuration items. // BatchProposerConfig loads batch_proposer configuration items.
type BatchProposerConfig struct { type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"` MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"` MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"` MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"` MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"` BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
} }

View File

@@ -109,7 +109,7 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
for _, privStr := range jsonConfig.RollupSenderPrivateKeys { for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr)) priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil { if err != nil {
return fmt.Errorf("incorrect roller_private_key format, err: %v", err) return fmt.Errorf("incorrect prover_private_key format, err: %v", err)
} }
r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv) r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv)
} }

View File

@@ -52,7 +52,7 @@ type Layer1Relayer struct {
gasPriceDiff uint64 gasPriceDiff uint64
l1MessageOrm *orm.L1Message l1MessageOrm *orm.L1Message
l1Block *orm.L1Block l1BlockOrm *orm.L1Block
} }
// NewLayer1Relayer will return a new instance of Layer1RelayerClient // NewLayer1Relayer will return a new instance of Layer1RelayerClient
@@ -90,7 +90,7 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
l1Relayer := &Layer1Relayer{ l1Relayer := &Layer1Relayer{
ctx: ctx, ctx: ctx,
l1MessageOrm: orm.NewL1Message(db), l1MessageOrm: orm.NewL1Message(db),
l1Block: orm.NewL1Block(db), l1BlockOrm: orm.NewL1Block(db),
messageSender: messageSender, messageSender: messageSender,
l2MessengerABI: bridgeAbi.L2ScrollMessengerABI, l2MessengerABI: bridgeAbi.L2ScrollMessengerABI,
@@ -159,13 +159,13 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// ProcessGasPriceOracle imports gas price to layer2 // ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() { func (r *Layer1Relayer) ProcessGasPriceOracle() {
latestBlockHeight, err := r.l1Block.GetLatestL1BlockHeight() latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
if err != nil { if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err) log.Warn("Failed to fetch latest L1 block height from db", "err", err)
return return
} }
blocks, err := r.l1Block.GetL1Blocks(map[string]interface{}{ blocks, err := r.l1BlockOrm.GetL1Blocks(r.ctx, map[string]interface{}{
"number": latestBlockHeight, "number": latestBlockHeight,
}) })
if err != nil { if err != nil {
@@ -197,7 +197,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return return
} }
err = r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String()) err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
if err != nil { if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err) log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return return
@@ -232,14 +232,14 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.gasOracleSender.ConfirmChan(): case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful { if !cfm.IsSuccessful {
// @discuss: maybe make it pending again? // @discuss: maybe make it pending again?
err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String()) err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
} }
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm) log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else { } else {
// @todo handle db error // @todo handle db error
err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String()) err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil { if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err) log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
} }

View File

@@ -12,13 +12,14 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeUtils "scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -49,7 +50,7 @@ var (
) )
func setupL1RelayerDB(t *testing.T) *gorm.DB { func setupL1RelayerDB(t *testing.T) *gorm.DB {
db, err := bridgeUtils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -60,7 +61,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
// testCreateNewRelayer test create new relayer instance and stop // testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) { func testCreateNewL1Relayer(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig) relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
@@ -68,7 +69,7 @@ func testCreateNewL1Relayer(t *testing.T) {
func testL1RelayerProcessSaveEvents(t *testing.T) { func testL1RelayerProcessSaveEvents(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db) l1MessageOrm := orm.NewL1Message(db)
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig) relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
@@ -86,7 +87,7 @@ func testL1RelayerProcessSaveEvents(t *testing.T) {
func testL1RelayerMsgConfirm(t *testing.T) { func testL1RelayerMsgConfirm(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db) l1MessageOrm := orm.NewL1Message(db)
l1Messages := []*orm.L1Message{ l1Messages := []*orm.L1Message{
{MsgHash: "msg-1", QueueIndex: 0}, {MsgHash: "msg-1", QueueIndex: 0},
@@ -123,12 +124,12 @@ func testL1RelayerMsgConfirm(t *testing.T) {
func testL1RelayerGasOracleConfirm(t *testing.T) { func testL1RelayerGasOracleConfirm(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l1BlockOrm := orm.NewL1Block(db) l1BlockOrm := orm.NewL1Block(db)
l1Block := []orm.L1Block{ l1Block := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0}, {Hash: "gas-oracle-1", Number: 0, GasOracleStatus: int16(types.GasOraclePending)},
{Hash: "gas-oracle-2", Number: 1}, {Hash: "gas-oracle-2", Number: 1, GasOracleStatus: int16(types.GasOraclePending)},
} }
// Insert test data. // Insert test data.
assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block)) assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block))
@@ -152,8 +153,8 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
// Check the database for the updated status using TryTimes. // Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool { ok := utils.TryTimes(5, func() bool {
msg1, err1 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-1"}) msg1, err1 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-2"}) msg2, err2 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-2"})
return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported && return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed
}) })
@@ -162,7 +163,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
func testL1RelayerProcessGasPriceOracle(t *testing.T) { func testL1RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL1RelayerDB(t) db := setupL1RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@@ -174,28 +175,28 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
var l1BlockOrm *orm.L1Block var l1BlockOrm *orm.L1Block
convey.Convey("GetLatestL1BlockHeight failure", t, func() { convey.Convey("GetLatestL1BlockHeight failure", t, func() {
targetErr := errors.New("GetLatestL1BlockHeight error") targetErr := errors.New("GetLatestL1BlockHeight error")
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) { patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
return 0, targetErr return 0, targetErr
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) { patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
return 100, nil return 100, nil
}) })
defer patchGuard.Reset() defer patchGuard.Reset()
convey.Convey("GetL1Blocks failure", t, func() { convey.Convey("GetL1Blocks failure", t, func() {
targetErr := errors.New("GetL1Blocks error") targetErr := errors.New("GetL1Blocks error")
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
return nil, targetErr return nil, targetErr
}) })
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
convey.Convey("Block not exist", t, func() { convey.Convey("Block not exist", t, func() {
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []orm.L1Block{ tmpInfo := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0}, {Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1}, {Hash: "gas-oracle-2", Number: 1},
@@ -205,12 +206,12 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
}) })
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) { patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []orm.L1Block{ tmpInfo := []orm.L1Block{
{ {
Hash: "gas-oracle-1", Hash: "gas-oracle-1",
Number: 0, Number: 0,
GasOracleStatus: int(types.GasOraclePending), GasOracleStatus: int16(types.GasOraclePending),
}, },
} }
return tmpInfo, nil return tmpInfo, nil

View File

@@ -22,7 +22,6 @@ import (
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
var ( var (
@@ -30,7 +29,6 @@ var (
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry) bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesSkippedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
) )
// Layer2Relayer is responsible for // Layer2Relayer is responsible for
@@ -171,11 +169,11 @@ func (r *Layer2Relayer) initializeGenesis() error {
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String()) log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
chunk := &bridgeTypes.Chunk{ chunk := &types.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{{ Blocks: []*types.WrappedBlock{{
Header: genesis, Header: genesis,
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawRoot: common.Hash{},
}}, }},
} }
@@ -191,7 +189,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
} }
var batch *orm.Batch var batch *orm.Batch
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*bridgeTypes.Chunk{chunk}, dbTX) batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk}, dbTX)
if err != nil { if err != nil {
return fmt.Errorf("failed to insert batch: %v", err) return fmt.Errorf("failed to insert batch: %v", err)
} }
@@ -319,7 +317,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
} }
for _, batch := range pendingBatches { for _, batch := range pendingBatches {
// get current header and parent header. // get current header and parent header.
currentBatchHeader, err := bridgeTypes.DecodeBatchHeader(batch.BatchHeader) currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
if err != nil { if err != nil {
log.Error("Failed to decode batch header", "index", batch.Index, "error", err) log.Error("Failed to decode batch header", "index", batch.Index, "error", err)
return return
@@ -346,7 +344,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
encodedChunks := make([][]byte, len(dbChunks)) encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks { for i, c := range dbChunks {
var wrappedBlocks []*bridgeTypes.WrappedBlock var wrappedBlocks []*types.WrappedBlock
wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber) wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil { if err != nil {
log.Error("Failed to fetch wrapped blocks", log.Error("Failed to fetch wrapped blocks",
@@ -354,7 +352,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
"end number", c.EndBlockNumber, "error", err) "end number", c.EndBlockNumber, "error", err)
return return
} }
chunk := &bridgeTypes.Chunk{ chunk := &types.Chunk{
Blocks: wrappedBlocks, Blocks: wrappedBlocks,
} }
var chunkBytes []byte var chunkBytes []byte
@@ -395,15 +393,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// ProcessCommittedBatches submit proof to layer 1 rollup contract // ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() { func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation
if count, err := r.batchOrm.UpdateSkippedBatches(r.ctx); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
bridgeL2BatchesSkippedTotalCounter.Inc(int64(count))
log.Info("Skipping batches", "count", count)
}
// retrieves the earliest batch whose rollup status is 'committed' // retrieves the earliest batch whose rollup status is 'committed'
fields := map[string]interface{}{ fields := map[string]interface{}{
"rollup_status": types.RollupCommitted, "rollup_status": types.RollupCommitted,
@@ -428,14 +417,9 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// The proof for this block is not ready yet. // The proof for this block is not ready yet.
return return
case types.ProvingTaskProved: case types.ProvingTaskProved:
// It's an intermediate state. The roller manager received the proof but has not verified // It's an intermediate state. The prover manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified. // the proof yet. We don't roll up the proof until it's verified.
return return
case types.ProvingTaskFailed, types.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
case types.ProvingTaskVerified: case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash) log.Info("Start to roll up zk proof", "hash", hash)
success := false success := false
@@ -455,8 +439,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
defer func() { defer func() {
// TODO: need to revisit this and have a more fine-grained error handling // TODO: need to revisit this and have a more fine-grained error handling
if !success { if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash) log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil { if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err) log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} }
} }
@@ -511,9 +495,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
r.processingFinalization.Store(txID, hash) r.processingFinalization.Store(txID, hash)
default: default:
log.Error("encounter unreachable case in ProcessCommittedBatches", log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status)
"block_status", status,
)
} }
} }

View File

@@ -12,19 +12,19 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender" "scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
bridgeUtils "scroll-tech/bridge/internal/utils"
) )
func setupL2RelayerDB(t *testing.T) *gorm.DB { func setupL2RelayerDB(t *testing.T) *gorm.DB {
db, err := bridgeUtils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -34,7 +34,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) { func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, relayer) assert.NotNil(t, relayer)
@@ -42,14 +42,14 @@ func testCreateNewRelayer(t *testing.T) {
func testL2RelayerProcessPendingBatches(t *testing.T) { func testL2RelayerProcessPendingBatches(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db) l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
chunkOrm := orm.NewChunk(db) chunkOrm := orm.NewChunk(db)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1) dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
@@ -57,7 +57,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2) dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err) assert.NoError(t, err)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*bridgeTypes.Chunk{chunk1, chunk2}) batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err) assert.NoError(t, err)
relayer.ProcessPendingBatches() relayer.ProcessPendingBatches()
@@ -70,13 +70,13 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
func testL2RelayerProcessCommittedBatches(t *testing.T) { func testL2RelayerProcessCommittedBatches(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2}) batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err) assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted) err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
@@ -90,13 +90,12 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, len(statuses)) assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0]) assert.Equal(t, types.RollupFinalizeFailed, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted) err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err) assert.NoError(t, err)
proof := &message.AggProof{ proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
} }
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100) err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err) assert.NoError(t, err)
@@ -108,70 +107,9 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
assert.Equal(t, types.RollupFinalizing, statuses[0]) assert.Equal(t, types.RollupFinalizing, statuses[0])
} }
func testL2RelayerSkipBatches(t *testing.T) {
db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus) string {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, rollupStatus)
assert.NoError(t, err)
proof := &message.AggProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, provingStatus)
assert.NoError(t, err)
return batch.Hash
}
skipped := []string{
createBatch(types.RollupCommitted, types.ProvingTaskSkipped),
createBatch(types.RollupCommitted, types.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(types.RollupPending, types.ProvingTaskSkipped),
createBatch(types.RollupCommitting, types.ProvingTaskSkipped),
createBatch(types.RollupFinalizing, types.ProvingTaskSkipped),
createBatch(types.RollupFinalized, types.ProvingTaskSkipped),
createBatch(types.RollupPending, types.ProvingTaskFailed),
createBatch(types.RollupCommitting, types.ProvingTaskFailed),
createBatch(types.RollupFinalizing, types.ProvingTaskFailed),
createBatch(types.RollupFinalized, types.ProvingTaskFailed),
createBatch(types.RollupCommitted, types.ProvingTaskVerified),
}
relayer.ProcessCommittedBatches()
for _, id := range skipped {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
}
for _, id := range notSkipped {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{id})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.NotEqual(t, types.RollupFinalizationSkipped, statuses[0])
}
}
func testL2RelayerRollupConfirm(t *testing.T) { func testL2RelayerRollupConfirm(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config l2Cfg := cfg.L2Config
@@ -187,7 +125,7 @@ func testL2RelayerRollupConfirm(t *testing.T) {
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(processingKeys)) batchHashes := make([]string, len(processingKeys))
for i := range batchHashes { for i := range batchHashes {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk1, chunk2}) batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err) assert.NoError(t, err)
batchHashes[i] = batch.Hash batchHashes[i] = batch.Hash
} }
@@ -232,13 +170,13 @@ func testL2RelayerRollupConfirm(t *testing.T) {
func testL2RelayerGasOracleConfirm(t *testing.T) { func testL2RelayerGasOracleConfirm(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1}) batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
assert.NoError(t, err) assert.NoError(t, err)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2}) batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
assert.NoError(t, err) assert.NoError(t, err)
// Create and set up the Layer2 Relayer. // Create and set up the Layer2 Relayer.
@@ -281,7 +219,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
func testLayer2RelayerProcessGasPriceOracle(t *testing.T) { func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t) db := setupL2RelayerDB(t)
defer bridgeUtils.CloseDB(db) defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false) relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -9,10 +9,11 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
var ( var (
@@ -25,12 +26,12 @@ var (
l2Cli *ethclient.Client l2Cli *ethclient.Client
// l2 block // l2 block
wrappedBlock1 *bridgeTypes.WrappedBlock wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *bridgeTypes.WrappedBlock wrappedBlock2 *types.WrappedBlock
// chunk // chunk
chunk1 *bridgeTypes.Chunk chunk1 *types.Chunk
chunk2 *bridgeTypes.Chunk chunk2 *types.Chunk
chunkHash1 common.Hash chunkHash1 common.Hash
chunkHash2 common.Hash chunkHash2 common.Hash
) )
@@ -45,7 +46,7 @@ func setupEnv(t *testing.T) {
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = &config.DBConfig{ cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
@@ -58,19 +59,19 @@ func setupEnv(t *testing.T) {
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json") templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock1 = &bridgeTypes.WrappedBlock{} wrappedBlock1 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace1, wrappedBlock1) err = json.Unmarshal(templateBlockTrace1, wrappedBlock1)
assert.NoError(t, err) assert.NoError(t, err)
chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}} chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
chunkHash1, err = chunk1.Hash(0) chunkHash1, err = chunk1.Hash(0)
assert.NoError(t, err) assert.NoError(t, err)
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json") templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
assert.NoError(t, err) assert.NoError(t, err)
wrappedBlock2 = &bridgeTypes.WrappedBlock{} wrappedBlock2 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace2, wrappedBlock2) err = json.Unmarshal(templateBlockTrace2, wrappedBlock2)
assert.NoError(t, err) assert.NoError(t, err)
chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}} chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0)) chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -96,7 +97,6 @@ func TestFunctions(t *testing.T) {
t.Run("TestCreateNewRelayer", testCreateNewRelayer) t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches) t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches) t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm) t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm) t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle) t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)

View File

@@ -343,6 +343,20 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
if gasTipCap.Cmp(feeData.gasTipCap) < 0 { if gasTipCap.Cmp(feeData.gasTipCap) < 0 {
gasTipCap = feeData.gasTipCap gasTipCap = feeData.gasTipCap
} }
// adjust for rising basefee
adjBaseFee := big.NewInt(0)
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
adjBaseFee.SetUint64(feeGas)
}
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
currentGasFeeCap := new(big.Int).Add(gasTipCap, adjBaseFee)
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
gasFeeCap = currentGasFeeCap
}
// but don't exceed maxGasPrice
if gasFeeCap.Cmp(maxGasPrice) > 0 { if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice gasFeeCap = maxGasPrice
} }

View File

@@ -65,6 +65,7 @@ func TestSender(t *testing.T) {
t.Run("test min gas limit", testMinGasLimit) t.Run("test min gas limit", testMinGasLimit)
t.Run("test resubmit transaction", testResubmitTransaction) t.Run("test resubmit transaction", testResubmitTransaction)
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
t.Run("test check pending transaction", testCheckPendingTransaction) t.Run("test check pending transaction", testCheckPendingTransaction)
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) }) t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
@@ -154,6 +155,43 @@ func testResubmitTransaction(t *testing.T) {
} }
} }
func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
txType := "DynamicFeeTx"
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
auth := s.auths.getAccount()
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
s.baseFeePerGas = 1000
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
// bump the basefee by 10x
s.baseFeePerGas *= 10
// resubmit and check that the gas fee has been adjusted accordingly
newTx, err := s.resubmitTransaction(feeData, auth, tx)
assert.NoError(t, err)
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
adjBaseFee := new(big.Int)
adjBaseFee.SetUint64(s.baseFeePerGas)
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
expectedGasFeeCap := new(big.Int).Add(feeData.gasTipCap, adjBaseFee)
if expectedGasFeeCap.Cmp(maxGasPrice) > 0 {
expectedGasFeeCap = maxGasPrice
}
assert.Equal(t, expectedGasFeeCap.Int64(), newTx.GasFeeCap().Int64())
s.Stop()
}
func testCheckPendingTransaction(t *testing.T) { func testCheckPendingTransaction(t *testing.T) {
for _, txType := range txTypes { for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig

View File

@@ -8,9 +8,10 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
// BatchProposer proposes batches based on available unbatched chunks. // BatchProposer proposes batches based on available unbatched chunks.
@@ -18,15 +19,16 @@ type BatchProposer struct {
ctx context.Context ctx context.Context
db *gorm.DB db *gorm.DB
batchOrm *orm.Batch batchOrm *orm.Batch
chunkOrm *orm.Chunk chunkOrm *orm.Chunk
l2Block *orm.L2Block l2BlockOrm *orm.L2Block
maxChunkNumPerBatch uint64 maxChunkNumPerBatch uint64
maxL1CommitGasPerBatch uint64 maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint64 maxL1CommitCalldataSizePerBatch uint32
minChunkNumPerBatch uint64 minChunkNumPerBatch uint64
batchTimeoutSec uint64 batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
} }
// NewBatchProposer creates a new BatchProposer instance. // NewBatchProposer creates a new BatchProposer instance.
@@ -36,12 +38,13 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
db: db, db: db,
batchOrm: orm.NewBatch(db), batchOrm: orm.NewBatch(db),
chunkOrm: orm.NewChunk(db), chunkOrm: orm.NewChunk(db),
l2Block: orm.NewL2Block(db), l2BlockOrm: orm.NewL2Block(db),
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch, maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch, maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch, maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
minChunkNumPerBatch: cfg.MinChunkNumPerBatch, minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec, batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
} }
} }
@@ -92,18 +95,46 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
} }
if len(dbChunks) == 0 { if len(dbChunks) == 0 {
log.Warn("No Unbatched Chunks")
return nil, nil return nil, nil
} }
firstChunk := dbChunks[0] firstChunk := dbChunks[0]
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
totalL1CommitGas := firstChunk.TotalL1CommitGas totalL1CommitGas := firstChunk.TotalL1CommitGas
var totalChunks uint64 = 1 totalChunks := uint64(1)
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
if err != nil {
return nil, err
}
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// Add extra gas costs
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 16 // version in calldata
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
if parentBatch != nil {
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
}
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
// Check if the first chunk breaks hard limits. // Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed. // If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if totalL1CommitGas > p.maxL1CommitGasPerBatch { if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v", "the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
firstChunk.StartBlockNumber, firstChunk.StartBlockNumber,
@@ -124,12 +155,21 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
} }
for i, chunk := range dbChunks[1:] { for i, chunk := range dbChunks[1:] {
totalChunks++
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
totalL1CommitGas += chunk.TotalL1CommitGas totalL1CommitGas += chunk.TotalL1CommitGas
// adjust batch data hash gas cost
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
totalChunks++
totalL1CommitGas += getKeccakGas(32 * totalChunks)
// adjust batch header hash gas cost
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
if totalChunks > p.maxChunkNumPerBatch || if totalChunks > p.maxChunkNumPerBatch ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch || totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalL1CommitGas > p.maxL1CommitGasPerBatch { p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return dbChunks[:i+1], nil return dbChunks[:i+1], nil
} }
} }
@@ -146,7 +186,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
} }
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch { if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
log.Warn("The payload size of the batch is less than the minimum limit", log.Warn("The chunk number of the batch is less than the minimum limit",
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch, "chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
) )
return nil, nil return nil, nil
@@ -154,16 +194,16 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
return dbChunks, nil return dbChunks, nil
} }
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*bridgeTypes.Chunk, error) { func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
chunks := make([]*bridgeTypes.Chunk, len(dbChunks)) chunks := make([]*types.Chunk, len(dbChunks))
for i, c := range dbChunks { for i, c := range dbChunks {
wrappedBlocks, err := p.l2Block.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber) wrappedBlocks, err := p.l2BlockOrm.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil { if err != nil {
log.Error("Failed to fetch wrapped blocks", log.Error("Failed to fetch wrapped blocks",
"start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err) "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return nil, err return nil, err
} }
chunks[i] = &bridgeTypes.Chunk{ chunks[i] = &types.Chunk{
Blocks: wrappedBlocks, Blocks: wrappedBlocks,
} }
} }

View File

@@ -6,21 +6,20 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
// TODO: Add unit tests that the limits are enforced correctly. // TODO: Add unit tests that the limits are enforced correctly.
func testBatchProposer(t *testing.T) { func testBatchProposer(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db) l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{

View File

@@ -8,9 +8,10 @@ import (
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
) )
// ChunkProposer proposes chunks based on available unchunked blocks. // ChunkProposer proposes chunks based on available unchunked blocks.
@@ -27,6 +28,7 @@ type ChunkProposer struct {
maxL1CommitCalldataSizePerChunk uint64 maxL1CommitCalldataSizePerChunk uint64
minL1CommitCalldataSizePerChunk uint64 minL1CommitCalldataSizePerChunk uint64
chunkTimeoutSec uint64 chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
} }
// NewChunkProposer creates a new ChunkProposer instance. // NewChunkProposer creates a new ChunkProposer instance.
@@ -42,6 +44,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk, maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk, minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec, chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
} }
} }
@@ -58,9 +61,8 @@ func (p *ChunkProposer) TryProposeChunk() {
} }
} }
func (p *ChunkProposer) updateChunkInfoInDB(chunk *bridgeTypes.Chunk) error { func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
if chunk == nil { if chunk == nil {
log.Warn("proposed chunk is nil, cannot update in DB")
return nil return nil
} }
@@ -78,22 +80,22 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *bridgeTypes.Chunk) error {
return err return err
} }
func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) { func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx) blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(blocks) == 0 { if len(blocks) == 0 {
log.Warn("no un-chunked blocks")
return nil, nil return nil, nil
} }
firstBlock := blocks[0] chunk := &types.Chunk{Blocks: blocks[:1]}
firstBlock := chunk.Blocks[0]
totalTxGasUsed := firstBlock.Header.GasUsed totalTxGasUsed := firstBlock.Header.GasUsed
totalL2TxNum := firstBlock.L2TxsNum() totalL2TxNum := firstBlock.L2TxsNum()
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize() totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
totalL1CommitGas := firstBlock.EstimateL1CommitGas() totalL1CommitGas := chunk.EstimateL1CommitGas()
// Check if the first block breaks hard limits. // Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed. // If so, it indicates there are bugs in sequencer, manual fix is needed.
@@ -106,7 +108,7 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
) )
} }
if totalL1CommitGas > p.maxL1CommitGasPerChunk { if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v", "the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
firstBlock.Header.Number, firstBlock.Header.Number,
@@ -134,16 +136,17 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
) )
} }
for i, block := range blocks[1:] { for _, block := range blocks[1:] {
chunk.Blocks = append(chunk.Blocks, block)
totalTxGasUsed += block.Header.GasUsed totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum() totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize() totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas() totalL1CommitGas = chunk.EstimateL1CommitGas()
if totalTxGasUsed > p.maxTxGasPerChunk || if totalTxGasUsed > p.maxTxGasPerChunk ||
totalL2TxNum > p.maxL2TxNumPerChunk || totalL2TxNum > p.maxL2TxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk || totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalL1CommitGas > p.maxL1CommitGasPerChunk { p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
blocks = blocks[:i+1] chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
break break
} }
} }
@@ -166,5 +169,5 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
) )
return nil, nil return nil, nil
} }
return &bridgeTypes.Chunk{Blocks: blocks}, nil return chunk, nil
} }

View File

@@ -6,19 +6,20 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
// TODO: Add unit tests that the limits are enforced correctly. // TODO: Add unit tests that the limits are enforced correctly.
func testChunkProposer(t *testing.T) { func testChunkProposer(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db) l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
@@ -31,8 +32,8 @@ func testChunkProposer(t *testing.T) {
}, db) }, db)
cp.TryProposeChunk() cp.TryProposeChunk()
expectedChunk := &bridgeTypes.Chunk{ expectedChunk := &types.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}, Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2},
} }
expectedHash, err := expectedChunk.Hash(0) expectedHash, err := expectedChunk.Hash(0)
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -74,7 +74,7 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
} }
l1BlockOrm := orm.NewL1Block(db) l1BlockOrm := orm.NewL1Block(db)
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight() savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(ctx)
if err != nil { if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err) log.Warn("Failed to fetch latest L1 block height from db", "err", err)
savedL1BlockHeight = 0 savedL1BlockHeight = 0
@@ -149,9 +149,10 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
baseFee = block.BaseFee.Uint64() baseFee = block.BaseFee.Uint64()
} }
blocks = append(blocks, orm.L1Block{ blocks = append(blocks, orm.L1Block{
Number: uint64(height), Number: uint64(height),
Hash: block.Hash().String(), Hash: block.Hash().String(),
BaseFee: baseFee, BaseFee: baseFee,
GasOracleStatus: int16(types.GasOraclePending),
}) })
} }

View File

@@ -17,6 +17,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
commonTypes "scroll-tech/common/types" commonTypes "scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/abi"
@@ -36,13 +37,13 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
func testFetchContractEvent(t *testing.T) { func testFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
assert.NoError(t, watcher.FetchContractEvent()) assert.NoError(t, watcher.FetchContractEvent())
} }
func testL1WatcherClientFetchBlockHeader(t *testing.T) { func testL1WatcherClientFetchBlockHeader(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
convey.Convey("test toBlock < fromBlock", t, func() { convey.Convey("test toBlock < fromBlock", t, func() {
var blockHeight uint64 var blockHeight uint64
if watcher.ProcessedBlockHeight() <= 0 { if watcher.ProcessedBlockHeight() <= 0 {
@@ -114,7 +115,7 @@ func testL1WatcherClientFetchBlockHeader(t *testing.T) {
func testL1WatcherClientFetchContractEvent(t *testing.T) { func testL1WatcherClientFetchContractEvent(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
watcher.SetConfirmations(rpc.SafeBlockNumber) watcher.SetConfirmations(rpc.SafeBlockNumber)
convey.Convey("get latest confirmed block number failure", t, func() { convey.Convey("get latest confirmed block number failure", t, func() {
@@ -259,7 +260,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) { func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
logs := []types.Log{ logs := []types.Log{
{ {
@@ -305,7 +306,7 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) { func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
logs := []types.Log{ logs := []types.Log{
{ {
Topics: []common.Hash{bridgeAbi.L1CommitBatchEventSignature}, Topics: []common.Hash{bridgeAbi.L1CommitBatchEventSignature},
@@ -347,7 +348,7 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) { func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
watcher, db := setupL1Watcher(t) watcher, db := setupL1Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
logs := []types.Log{ logs := []types.Log{
{ {
Topics: []common.Hash{bridgeAbi.L1FinalizeBatchEventSignature}, Topics: []common.Hash{bridgeAbi.L1FinalizeBatchEventSignature},

View File

@@ -22,7 +22,6 @@ import (
bridgeAbi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils" "scroll-tech/bridge/internal/utils"
) )
@@ -110,7 +109,7 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
} }
// Fetch and store block traces for missing blocks // Fetch and store block traces for missing blocks
for from := uint64(heightInDB) + 1; from <= blockHeight; from += blockTracesFetchLimit { for from := heightInDB + 1; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1 to := from + blockTracesFetchLimit - 1
if to > blockHeight { if to > blockHeight {
@@ -160,7 +159,7 @@ func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
} }
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error { func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var blocks []*bridgeTypes.WrappedBlock var blocks []*types.WrappedBlock
for number := from; number <= to; number++ { for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number) log.Debug("retrieving block", "height", number)
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number))) block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
@@ -170,15 +169,15 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String()) log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String())
withdrawTrieRoot, err3 := w.StorageAt(ctx, w.messageQueueAddress, w.withdrawTrieRootSlot, big.NewInt(int64(number))) withdrawRoot, err3 := w.StorageAt(ctx, w.messageQueueAddress, w.withdrawTrieRootSlot, big.NewInt(int64(number)))
if err3 != nil { if err3 != nil {
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number) return fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
} }
blocks = append(blocks, &bridgeTypes.WrappedBlock{ blocks = append(blocks, &types.WrappedBlock{
Header: block.Header(), Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()), Transactions: txsToTxsData(block.Transactions()),
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot), WithdrawRoot: common.BytesToHash(withdrawRoot),
}) })
} }

View File

@@ -21,6 +21,7 @@ import (
"github.com/smartystreets/goconvey/convey" "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
cutils "scroll-tech/common/utils" cutils "scroll-tech/common/utils"
bridgeAbi "scroll-tech/bridge/abi" bridgeAbi "scroll-tech/bridge/abi"
@@ -42,7 +43,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
subCtx, cancel := context.WithCancel(context.Background()) subCtx, cancel := context.WithCancel(context.Background())
defer func() { defer func() {
cancel() cancel()
defer utils.CloseDB(db) defer database.CloseDB(db)
}() }()
loopToFetchEvent(subCtx, wc) loopToFetchEvent(subCtx, wc)
@@ -68,7 +69,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
func testFetchRunningMissingBlocks(t *testing.T) { func testFetchRunningMissingBlocks(t *testing.T) {
_, db := setupL2Watcher(t) _, db := setupL2Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0]) auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
@@ -87,7 +88,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
wc := prepareWatcherClient(l2Cli, db, address) wc := prepareWatcherClient(l2Cli, db, address)
wc.TryFetchRunningMissingBlocks(latestHeight) wc.TryFetchRunningMissingBlocks(latestHeight)
fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background()) fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
return err == nil && uint64(fetchedHeight) == latestHeight return err == nil && fetchedHeight == latestHeight
}) })
assert.True(t, ok) assert.True(t, ok)
} }
@@ -114,7 +115,7 @@ func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
logs := []gethTypes.Log{ logs := []gethTypes.Log{
{ {
@@ -154,7 +155,7 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) { func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t) watcher, db := setupL2Watcher(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
logs := []gethTypes.Log{ logs := []gethTypes.Log{
{ {

View File

@@ -9,12 +9,13 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/config" "scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -27,8 +28,8 @@ var (
l2Cli *ethclient.Client l2Cli *ethclient.Client
// block trace // block trace
wrappedBlock1 *bridgeTypes.WrappedBlock wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *bridgeTypes.WrappedBlock wrappedBlock2 *types.WrappedBlock
) )
func setupEnv(t *testing.T) (err error) { func setupEnv(t *testing.T) (err error) {
@@ -40,7 +41,7 @@ func setupEnv(t *testing.T) (err error) {
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = &config.DBConfig{ cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
@@ -56,7 +57,7 @@ func setupEnv(t *testing.T) (err error) {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock1 = &bridgeTypes.WrappedBlock{} wrappedBlock1 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil { if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
return err return err
} }
@@ -66,7 +67,7 @@ func setupEnv(t *testing.T) (err error) {
return err return err
} }
// unmarshal blockTrace // unmarshal blockTrace
wrappedBlock2 = &bridgeTypes.WrappedBlock{} wrappedBlock2 = &types.WrappedBlock{}
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil { if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
return err return err
} }
@@ -74,7 +75,7 @@ func setupEnv(t *testing.T) (err error) {
} }
func setupDB(t *testing.T) *gorm.DB { func setupDB(t *testing.T) *gorm.DB {
db, err := utils.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -10,8 +10,6 @@ import (
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
bridgeTypes "scroll-tech/bridge/internal/types"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
@@ -32,14 +30,16 @@ type Batch struct {
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"` EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"` StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"` WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"` BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
// proof // proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"` Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProofTimeSec int `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"` ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// rollup // rollup
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"` RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
@@ -72,6 +72,7 @@ func (*Batch) TableName() string {
// The returned batches are sorted in ascending order by their index. // The returned batches are sorted in ascending order by their index.
func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Batch, error) { func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Batch, error) {
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
for key, value := range fields { for key, value := range fields {
db = db.Where(key, value) db = db.Where(key, value)
@@ -89,44 +90,54 @@ func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, o
var batches []*Batch var batches []*Batch
if err := db.Find(&batches).Error; err != nil { if err := db.Find(&batches).Error; err != nil {
return nil, err return nil, fmt.Errorf("Batch.GetBatches error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
} }
return batches, nil return batches, nil
} }
// GetBatchCount retrieves the total number of batches in the database. // GetBatchCount retrieves the total number of batches in the database.
func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) { func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
var count int64 var count int64
err := o.db.WithContext(ctx).Model(&Batch{}).Count(&count).Error if err := db.Count(&count).Error; err != nil {
if err != nil { return 0, fmt.Errorf("Batch.GetBatchCount error: %w", err)
return 0, err
} }
return uint64(count), nil return uint64(count), nil
} }
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash. // GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) { func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.BatchProof, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("proof")
db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified)
var batch Batch var batch Batch
err := o.db.WithContext(ctx).Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified).First(&batch).Error if err := db.Find(&batch).Error; err != nil {
if err != nil { return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
return nil, err
} }
var proof message.AggProof var proof message.BatchProof
err = json.Unmarshal(batch.Proof, &proof) if err := json.Unmarshal(batch.Proof, &proof); err != nil {
if err != nil { return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
return nil, err
} }
return &proof, nil return &proof, nil
} }
// GetLatestBatch retrieves the latest batch from the database. // GetLatestBatch retrieves the latest batch from the database.
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) { func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Order("index desc")
var latestBatch Batch var latestBatch Batch
err := o.db.WithContext(ctx).Order("index desc").First(&latestBatch).Error if err := db.First(&latestBatch).Error; err != nil {
if err != nil { if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, err return nil, nil
}
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
} }
return &latestBatch, nil return &latestBatch, nil
} }
@@ -134,13 +145,17 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes. // GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) { func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
if len(hashes) == 0 { if len(hashes) == 0 {
return []types.RollupStatus{}, nil return nil, nil
} }
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("hash, rollup_status")
db = db.Where("hash IN ?", hashes)
var batches []Batch var batches []Batch
err := o.db.WithContext(ctx).Where("hash IN ?", hashes).Find(&batches).Error if err := db.Find(&batches).Error; err != nil {
if err != nil { return nil, fmt.Errorf("Batch.GetRollupStatusByHashList error: %w, hashes: %v", err, hashes)
return nil, err
} }
hashToStatusMap := make(map[string]types.RollupStatus) hashToStatusMap := make(map[string]types.RollupStatus)
@@ -152,7 +167,7 @@ func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string)
for _, hash := range hashes { for _, hash := range hashes {
status, ok := hashToStatusMap[hash] status, ok := hashToStatusMap[hash]
if !ok { if !ok {
return nil, fmt.Errorf("hash not found in database: %s", hash) return nil, fmt.Errorf("Batch.GetRollupStatusByHashList: hash not found in database: %s", hash)
} }
statuses = append(statuses, status) statuses = append(statuses, status)
} }
@@ -167,40 +182,40 @@ func (o *Batch) GetPendingBatches(ctx context.Context, limit int) ([]*Batch, err
return nil, errors.New("limit must be greater than zero") return nil, errors.New("limit must be greater than zero")
} }
var batches []*Batch
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("rollup_status = ?", types.RollupPending)
db = db.Order("index ASC")
db = db.Limit(limit)
db = db.Where("rollup_status = ?", types.RollupPending).Order("index ASC").Limit(limit) var batches []*Batch
if err := db.Find(&batches).Error; err != nil { if err := db.Find(&batches).Error; err != nil {
return nil, err return nil, fmt.Errorf("Batch.GetPendingBatches error: %w", err)
} }
return batches, nil return batches, nil
} }
// GetBatchByIndex retrieves the batch by the given index. // GetBatchByIndex retrieves the batch by the given index.
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) { func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("index = ?", index)
var batch Batch var batch Batch
err := o.db.WithContext(ctx).Where("index = ?", index).First(&batch).Error if err := db.First(&batch).Error; err != nil {
if err != nil { return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
return nil, err
} }
return &batch, nil return &batch, nil
} }
// InsertBatch inserts a new batch into the database. // InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Batch, error) { func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
if len(chunks) == 0 { if len(chunks) == 0 {
return nil, errors.New("invalid args") return nil, errors.New("invalid args")
} }
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
parentBatch, err := o.GetLatestBatch(ctx) parentBatch, err := o.GetLatestBatch(ctx)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { if err != nil {
log.Error("failed to get the latest batch", "err", err) log.Error("failed to get the latest batch", "err", err)
return nil, err return nil, err
} }
@@ -217,8 +232,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
batchIndex = parentBatch.Index + 1 batchIndex = parentBatch.Index + 1
parentBatchHash = common.HexToHash(parentBatch.Hash) parentBatchHash = common.HexToHash(parentBatch.Hash)
var parentBatchHeader *bridgeTypes.BatchHeader var parentBatchHeader *types.BatchHeader
parentBatchHeader, err = bridgeTypes.DecodeBatchHeader(parentBatch.BatchHeader) parentBatchHeader, err = types.DecodeBatchHeader(parentBatch.BatchHeader)
if err != nil { if err != nil {
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err) log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
return nil, err return nil, err
@@ -228,7 +243,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
version = parentBatchHeader.Version() version = parentBatchHeader.Version()
} }
batchHeader, err := bridgeTypes.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks) batchHeader, err := types.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
if err != nil { if err != nil {
log.Error("failed to create batch header", log.Error("failed to create batch header",
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore, "index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
@@ -240,59 +255,54 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
lastChunkBlockNum := len(chunks[numChunks-1].Blocks) lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
newBatch := Batch{ newBatch := Batch{
Index: batchIndex, Index: batchIndex,
Hash: batchHeader.Hash().Hex(), Hash: batchHeader.Hash().Hex(),
StartChunkHash: startChunkHash, StartChunkHash: startChunkHash,
StartChunkIndex: startChunkIndex, StartChunkIndex: startChunkIndex,
EndChunkHash: endChunkHash, EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex, EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(), StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(), WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawRoot.Hex(),
BatchHeader: batchHeader.Encode(), ParentBatchHash: parentBatchHash.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned), BatchHeader: batchHeader.Encode(),
RollupStatus: int16(types.RollupPending), ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
} }
if err := db.WithContext(ctx).Create(&newBatch).Error; err != nil { db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db.WithContext(ctx)
db = db.Model(&Batch{})
if err := db.Create(&newBatch).Error; err != nil {
log.Error("failed to insert batch", "batch", newBatch, "err", err) log.Error("failed to insert batch", "batch", newBatch, "err", err)
return nil, err return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
} }
return &newBatch, nil return &newBatch, nil
} }
// UpdateSkippedBatches updates the skipped batches in the database.
func (o *Batch) UpdateSkippedBatches(ctx context.Context) (uint64, error) {
provingStatusList := []interface{}{
int(types.ProvingTaskSkipped),
int(types.ProvingTaskFailed),
}
result := o.db.Model(&Batch{}).Where("rollup_status", int(types.RollupCommitted)).
Where("proving_status IN (?)", provingStatusList).Update("rollup_status", int(types.RollupFinalizationSkipped))
if result.Error != nil {
return 0, result.Error
}
return uint64(result.RowsAffected), nil
}
// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch. // UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch.
func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error { func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["oracle_status"] = int(status) updateFields["oracle_status"] = int(status)
updateFields["oracle_tx_hash"] = txHash updateFields["oracle_tx_hash"] = txHash
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateL2GasOracleStatusAndOracleTxHash error: %w, batch hash: %v, status: %v, txHash: %v", err, hash, status.String(), txHash)
} }
return nil return nil
} }
// UpdateProvingStatus updates the proving status of a batch. // UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status) updateFields["proving_status"] = int(status)
@@ -303,22 +313,24 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = nil updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified: case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now() updateFields["proved_at"] = time.Now()
default:
} }
if err := db.Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil { db := o.db
return err if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
} }
return nil return nil
} }
// UpdateRollupStatus updates the rollup status of a batch. // UpdateRollupStatus updates the rollup status of a batch.
func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error { func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["rollup_status"] = int(status) updateFields["rollup_status"] = int(status)
@@ -328,8 +340,17 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
case types.RollupFinalized: case types.RollupFinalized:
updateFields["finalized_at"] = time.Now() updateFields["finalized_at"] = time.Now()
} }
if err := db.Model(&Batch{}).WithContext(ctx).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateRollupStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
} }
return nil return nil
} }
@@ -342,8 +363,13 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
if status == types.RollupCommitted { if status == types.RollupCommitted {
updateFields["committed_at"] = time.Now() updateFields["committed_at"] = time.Now()
} }
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateCommitTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), commitTxHash)
} }
return nil return nil
} }
@@ -356,23 +382,35 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
if status == types.RollupFinalized { if status == types.RollupFinalized {
updateFields["finalized_at"] = time.Now() updateFields["finalized_at"] = time.Now()
} }
if err := o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
return err db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), finalizeTxHash)
} }
return nil return nil
} }
// UpdateProofByHash updates the block batch proof by hash. // UpdateProofByHash updates the batch proof by hash.
// for unit test. // for unit test.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error { func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.BatchProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof) proofBytes, err := json.Marshal(proof)
if err != nil { if err != nil {
return err return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
} }
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proof"] = proofBytes updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec updateFields["proof_time_sec"] = proofTimeSec
err = o.db.WithContext(ctx).Model(&Batch{}).Where("hash", hash).Updates(updateFields).Error
return err db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err = db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
}
return nil
} }

View File

@@ -3,12 +3,11 @@ package orm
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"time" "time"
"scroll-tech/common/types" "scroll-tech/common/types"
bridgeTypes "scroll-tech/bridge/internal/types"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
) )
@@ -26,22 +25,26 @@ type Chunk struct {
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"` EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"` StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"` TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"` TotalL1MessagesPoppedInChunk uint32 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
ParentChunkHash string `json:"parent_chunk_hash" gorm:"column:parent_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
ParentChunkStateRoot string `json:"parent_chunk_state_root" gorm:"column:parent_chunk_state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
// proof // proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"` ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int16 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"` ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// batch // batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"` BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// metadata // metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"` TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"` TotalL2TxNum uint32 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"` TotalL1CommitCalldataSize uint32 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"` TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -63,19 +66,22 @@ func (*Chunk) TableName() string {
// The returned chunks are sorted in ascending order by their index. // The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endIndex uint64) ([]*Chunk, error) { func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endIndex uint64) ([]*Chunk, error) {
if startIndex > endIndex { if startIndex > endIndex {
return nil, errors.New("start index should be less than or equal to end index") return nil, fmt.Errorf("Chunk.GetChunksInRange: start index should be less than or equal to end index, start index: %v, end index: %v", startIndex, endIndex)
} }
var chunks []*Chunk db := o.db.WithContext(ctx)
db := o.db.WithContext(ctx).Where("index >= ? AND index <= ?", startIndex, endIndex) db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
db = db.Order("index ASC") db = db.Order("index ASC")
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil { if err := db.Find(&chunks).Error; err != nil {
return nil, err return nil, fmt.Errorf("Chunk.GetChunksInRange error: %w, start index: %v, end index: %v", err, startIndex, endIndex)
} }
if startIndex+uint64(len(chunks)) != endIndex+1 { // sanity check
return nil, errors.New("number of chunks not expected in the specified range") if uint64(len(chunks)) != endIndex-startIndex+1 {
return nil, fmt.Errorf("Chunk.GetChunksInRange: incorrect number of chunks, expected: %v, got: %v, start index: %v, end index: %v", endIndex-startIndex+1, len(chunks), startIndex, endIndex)
} }
return chunks, nil return chunks, nil
@@ -83,46 +89,45 @@ func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endInde
// GetUnbatchedChunks retrieves unbatched chunks from the database. // GetUnbatchedChunks retrieves unbatched chunks from the database.
func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) { func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash IS NULL")
db = db.Order("index asc")
var chunks []*Chunk var chunks []*Chunk
err := o.db.WithContext(ctx). if err := db.Find(&chunks).Error; err != nil {
Where("batch_hash IS NULL"). return nil, fmt.Errorf("Chunk.GetUnbatchedChunks error: %w", err)
Order("index asc").
Find(&chunks).Error
if err != nil {
return nil, err
} }
return chunks, nil return chunks, nil
} }
// GetLatestChunk retrieves the latest chunk from the database. // GetLatestChunk retrieves the latest chunk from the database.
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) { func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Order("index desc")
var latestChunk Chunk var latestChunk Chunk
err := o.db.WithContext(ctx). if err := db.First(&latestChunk).Error; err != nil {
Order("index desc"). return nil, fmt.Errorf("Chunk.GetLatestChunk error: %w", err)
First(&latestChunk).Error
if err != nil {
return nil, err
} }
return &latestChunk, nil return &latestChunk, nil
} }
// InsertChunk inserts a new chunk into the database. // InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX ...*gorm.DB) (*Chunk, error) { func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 { if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args") return nil, errors.New("invalid args")
} }
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
var chunkIndex uint64 var chunkIndex uint64
var totalL1MessagePoppedBefore uint64 var totalL1MessagePoppedBefore uint64
var parentChunkHash string
var parentChunkStateRoot string
parentChunk, err := o.GetLatestChunk(ctx) parentChunk, err := o.GetLatestChunk(ctx)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get latest chunk", "err", err) log.Error("failed to get latest chunk", "err", err)
return nil, err return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
} }
// if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's // if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's
@@ -130,24 +135,24 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX
// if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk // if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk
if parentChunk != nil { if parentChunk != nil {
chunkIndex = parentChunk.Index + 1 chunkIndex = parentChunk.Index + 1
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + parentChunk.TotalL1MessagesPoppedInChunk totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + uint64(parentChunk.TotalL1MessagesPoppedInChunk)
parentChunkHash = parentChunk.Hash
parentChunkStateRoot = parentChunk.StateRoot
} }
hash, err := chunk.Hash(totalL1MessagePoppedBefore) hash, err := chunk.Hash(totalL1MessagePoppedBefore)
if err != nil { if err != nil {
log.Error("failed to get chunk hash", "err", err) log.Error("failed to get chunk hash", "err", err)
return nil, err return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
} }
var totalL2TxGas uint64 var totalL2TxGas uint64
var totalL2TxNum uint64 var totalL2TxNum uint64
var totalL1CommitCalldataSize uint64 var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
for _, block := range chunk.Blocks { for _, block := range chunk.Blocks {
totalL2TxGas += block.Header.GasUsed totalL2TxGas += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum() totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize() totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas()
} }
numBlocks := len(chunk.Blocks) numBlocks := len(chunk.Blocks)
@@ -159,18 +164,28 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(), EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(), EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: totalL2TxGas, TotalL2TxGas: totalL2TxGas,
TotalL2TxNum: totalL2TxNum, TotalL2TxNum: uint32(totalL2TxNum),
TotalL1CommitCalldataSize: totalL1CommitCalldataSize, TotalL1CommitCalldataSize: uint32(totalL1CommitCalldataSize),
TotalL1CommitGas: totalL1CommitGas, TotalL1CommitGas: chunk.EstimateL1CommitGas(),
StartBlockTime: chunk.Blocks[0].Header.Time, StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore, TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore), TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)),
ParentChunkHash: parentChunkHash,
StateRoot: chunk.Blocks[numBlocks-1].Header.Root.Hex(),
ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned), ProvingStatus: int16(types.ProvingTaskUnassigned),
} }
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
if err := db.Create(&newChunk).Error; err != nil { if err := db.Create(&newChunk).Error; err != nil {
log.Error("failed to insert chunk", "hash", hash, "err", err) return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash)
return nil, err
} }
return &newChunk, nil return &newChunk, nil
@@ -178,11 +193,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *bridgeTypes.Chunk, dbTX
// UpdateProvingStatus updates the proving status of a chunk. // UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status) updateFields["proving_status"] = int(status)
@@ -193,11 +203,18 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = nil updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified: case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now() updateFields["proved_at"] = time.Now()
default:
} }
if err := db.Model(&Chunk{}).Where("hash", hash).Updates(updateFields).Error; err != nil { db := o.db
return err if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatus error: %w, chunk hash: %v, status: %v", err, hash, status.String())
} }
return nil return nil
} }
@@ -209,10 +226,12 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
if len(dbTX) > 0 && dbTX[0] != nil { if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0] db = dbTX[0]
} }
db = db.Model(&Chunk{}).Where("index >= ? AND index <= ?", startIndex, endIndex) db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
if err := db.Update("batch_hash", batchHash).Error; err != nil { if err := db.Update("batch_hash", batchHash).Error; err != nil {
return err return fmt.Errorf("Chunk.UpdateBatchHashInRange error: %w, start index: %v, end index: %v, batch hash: %v", err, startIndex, endIndex, batchHash)
} }
return nil return nil
} }

View File

@@ -2,8 +2,9 @@ package orm
import ( import (
"context" "context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
@@ -13,14 +14,19 @@ import (
type L1Block struct { type L1Block struct {
db *gorm.DB `gorm:"column:-"` db *gorm.DB `gorm:"column:-"`
Number uint64 `json:"number" gorm:"column:number"` // block
Hash string `json:"hash" gorm:"column:hash"` Number uint64 `json:"number" gorm:"column:number"`
HeaderRLP string `json:"header_rlp" gorm:"column:header_rlp"` Hash string `json:"hash" gorm:"column:hash"`
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"` BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
BlockStatus int `json:"block_status" gorm:"column:block_status;default:1"`
ImportTxHash string `json:"import_tx_hash" gorm:"column:import_tx_hash;default:NULL"` // oracle
GasOracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"` GasOracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"` OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
} }
// NewL1Block create an l1Block instance // NewL1Block create an l1Block instance
@@ -34,54 +40,64 @@ func (*L1Block) TableName() string {
} }
// GetLatestL1BlockHeight get the latest l1 block height // GetLatestL1BlockHeight get the latest l1 block height
func (l *L1Block) GetLatestL1BlockHeight() (uint64, error) { func (o *L1Block) GetLatestL1BlockHeight(ctx context.Context) (uint64, error) {
result := l.db.Model(&L1Block{}).Select("COALESCE(MAX(number), 0)").Row() db := o.db.WithContext(ctx)
if result.Err() != nil { db = db.Model(&L1Block{})
return 0, result.Err() db = db.Select("COALESCE(MAX(number), 0)")
}
var maxNumber uint64 var maxNumber uint64
if err := result.Scan(&maxNumber); err != nil { if err := db.Row().Scan(&maxNumber); err != nil {
return 0, err return 0, fmt.Errorf("L1Block.GetLatestL1BlockHeight error: %w", err)
} }
return maxNumber, nil return maxNumber, nil
} }
// GetL1Blocks get the l1 blocks // GetL1Blocks get the l1 blocks
func (l *L1Block) GetL1Blocks(fields map[string]interface{}) ([]L1Block, error) { func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}) ([]L1Block, error) {
var l1Blocks []L1Block db := o.db.WithContext(ctx)
db := l.db db = db.Model(&L1Block{})
for key, value := range fields { for key, value := range fields {
db = db.Where(key, value) db = db.Where(key, value)
} }
db = db.Order("number ASC") db = db.Order("number ASC")
var l1Blocks []L1Block
if err := db.Find(&l1Blocks).Error; err != nil { if err := db.Find(&l1Blocks).Error; err != nil {
return nil, err return nil, fmt.Errorf("L1Block.GetL1Blocks error: %w, fields: %v", err, fields)
} }
return l1Blocks, nil return l1Blocks, nil
} }
// InsertL1Blocks batch insert l1 blocks // InsertL1Blocks batch insert l1 blocks
func (l *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error { func (o *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
if len(blocks) == 0 { if len(blocks) == 0 {
return nil return nil
} }
err := l.db.WithContext(ctx).Create(&blocks).Error db := o.db.WithContext(ctx)
if err != nil { db = db.Model(&L1Block{})
log.Error("failed to insert L1 Blocks", "err", err)
if err := db.Create(&blocks).Error; err != nil {
return fmt.Errorf("L1Block.InsertL1Blocks error: %w", err)
} }
return err return nil
} }
// UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash // UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash
func (l *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error { func (o *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
updateFields := map[string]interface{}{ updateFields := map[string]interface{}{
"oracle_status": int(status), "oracle_status": int(status),
"oracle_tx_hash": txHash, "oracle_tx_hash": txHash,
} }
if err := l.db.WithContext(ctx).Model(&L1Block{}).Where("hash", blockHash).Updates(updateFields).Error; err != nil {
return err db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
db = db.Where("hash", blockHash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("L1Block.UpdateL1GasOracleStatusAndOracleTxHash error: %w, block hash: %v, status: %v, tx hash: %v", err, blockHash, status.String(), txHash)
} }
return nil return nil
} }

View File

@@ -3,6 +3,7 @@ package orm
import ( import (
"context" "context"
"database/sql" "database/sql"
"time"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
@@ -25,6 +26,11 @@ type L1Message struct {
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash"` Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:NULL"` Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:NULL"`
Status int `json:"status" gorm:"column:status;default:1"` Status int `json:"status" gorm:"column:status;default:1"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
} }
// NewL1Message create an L1MessageOrm instance // NewL1Message create an L1MessageOrm instance

View File

@@ -3,31 +3,40 @@ package orm
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"time"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/bridge/internal/types" "scroll-tech/common/types"
) )
// L2Block represents a l2 block in the database. // L2Block represents a l2 block in the database.
type L2Block struct { type L2Block struct {
db *gorm.DB `gorm:"column:-"` db *gorm.DB `gorm:"column:-"`
Number uint64 `json:"number" gorm:"number"` // block
Hash string `json:"hash" gorm:"hash"` Number uint64 `json:"number" gorm:"number"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"` Hash string `json:"hash" gorm:"hash"`
Header string `json:"header" gorm:"header"` ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Transactions string `json:"transactions" gorm:"transactions"` Header string `json:"header" gorm:"header"`
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"` Transactions string `json:"transactions" gorm:"transactions"`
TxNum uint64 `json:"tx_num" gorm:"tx_num"` WithdrawRoot string `json:"withdraw_root" gorm:"withdraw_root"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"` StateRoot string `json:"state_root" gorm:"state_root"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"` TxNum uint32 `json:"tx_num" gorm:"tx_num"`
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"` GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
// chunk
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
} }
// NewL2Block creates a new L2Block instance // NewL2Block creates a new L2Block instance
@@ -42,25 +51,30 @@ func (*L2Block) TableName() string {
// GetL2BlocksLatestHeight retrieves the height of the latest L2 block. // GetL2BlocksLatestHeight retrieves the height of the latest L2 block.
// If the l2_block table is empty, it returns 0 to represent the genesis block height. // If the l2_block table is empty, it returns 0 to represent the genesis block height.
// In case of an error, it returns -1 along with the error. func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (int64, error) { db := o.db.WithContext(ctx)
var maxNumber int64 db = db.Model(&L2Block{})
if err := o.db.WithContext(ctx).Model(&L2Block{}).Select("COALESCE(MAX(number), 0)").Row().Scan(&maxNumber); err != nil { db = db.Select("COALESCE(MAX(number), 0)")
return -1, err
}
var maxNumber uint64
if err := db.Row().Scan(&maxNumber); err != nil {
return 0, fmt.Errorf("L2Block.GetL2BlocksLatestHeight error: %w", err)
}
return maxNumber, nil return maxNumber, nil
} }
// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk. // GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk.
// The returned blocks are sorted in ascending order by their block number. // The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) { func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_root")
db = db.Where("chunk_hash IS NULL")
db = db.Order("number ASC")
var l2Blocks []L2Block var l2Blocks []L2Block
if err := o.db.WithContext(ctx).Select("header, transactions, withdraw_trie_root"). if err := db.Find(&l2Blocks).Error; err != nil {
Where("chunk_hash IS NULL"). return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
Order("number asc").
Find(&l2Blocks).Error; err != nil {
return nil, err
} }
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*types.WrappedBlock
@@ -68,15 +82,15 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
var wrappedBlock types.WrappedBlock var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil { if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
} }
wrappedBlock.Header = &gethTypes.Header{} wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil { if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
} }
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot) wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
wrappedBlocks = append(wrappedBlocks, &wrappedBlock) wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
} }
@@ -87,6 +101,7 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
// The returned L2Blocks are sorted in ascending order by their block number. // The returned L2Blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*L2Block, error) { func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*L2Block, error) {
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
for key, value := range fields { for key, value := range fields {
db = db.Where(key, value) db = db.Where(key, value)
@@ -104,7 +119,7 @@ func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}
var l2Blocks []*L2Block var l2Blocks []*L2Block
if err := db.Find(&l2Blocks).Error; err != nil { if err := db.Find(&l2Blocks).Error; err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetL2Blocks error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
} }
return l2Blocks, nil return l2Blocks, nil
} }
@@ -114,20 +129,23 @@ func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}
// The returned blocks are sorted in ascending order by their block number. // The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*types.WrappedBlock, error) { func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*types.WrappedBlock, error) {
if startBlockNumber > endBlockNumber { if startBlockNumber > endBlockNumber {
return nil, errors.New("start block number should be less than or equal to end block number") return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: start block number should be less than or equal to end block number, start block: %v, end block: %v", startBlockNumber, endBlockNumber)
} }
var l2Blocks []L2Block
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_root")
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber) db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
db = db.Order("number ASC") db = db.Order("number ASC")
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil { if err := db.Find(&l2Blocks).Error; err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
} }
// sanity check
if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 { if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 {
return nil, errors.New("number of blocks not expected in the specified range") return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: unexpected number of results, expected: %v, got: %v", endBlockNumber-startBlockNumber+1, len(l2Blocks))
} }
var wrappedBlocks []*types.WrappedBlock var wrappedBlocks []*types.WrappedBlock
@@ -135,15 +153,15 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
var wrappedBlock types.WrappedBlock var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil { if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
} }
wrappedBlock.Header = &gethTypes.Header{} wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil { if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, err return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
} }
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot) wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
wrappedBlocks = append(wrappedBlocks, &wrappedBlock) wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
} }
@@ -157,32 +175,35 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
header, err := json.Marshal(block.Header) header, err := json.Marshal(block.Header)
if err != nil { if err != nil {
log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err) log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err)
return err return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
} }
txs, err := json.Marshal(block.Transactions) txs, err := json.Marshal(block.Transactions)
if err != nil { if err != nil {
log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err) log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err)
return err return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
} }
l2Block := L2Block{ l2Block := L2Block{
Number: block.Header.Number.Uint64(), Number: block.Header.Number.Uint64(),
Hash: block.Header.Hash().String(), Hash: block.Header.Hash().String(),
ParentHash: block.Header.ParentHash.String(), ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs), Transactions: string(txs),
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(), WithdrawRoot: block.WithdrawRoot.Hex(),
TxNum: uint64(len(block.Transactions)), StateRoot: block.Header.Root.Hex(),
GasUsed: block.Header.GasUsed, TxNum: uint32(len(block.Transactions)),
BlockTimestamp: block.Header.Time, GasUsed: block.Header.GasUsed,
Header: string(header), BlockTimestamp: block.Header.Time,
Header: string(header),
} }
l2Blocks = append(l2Blocks, l2Block) l2Blocks = append(l2Blocks, l2Block)
} }
if err := o.db.WithContext(ctx).Create(&l2Blocks).Error; err != nil { db := o.db.WithContext(ctx)
log.Error("failed to insert l2Blocks", "err", err) db = db.Model(&L2Block{})
return err
if err := db.Create(&l2Blocks).Error; err != nil {
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
} }
return nil return nil
} }
@@ -196,13 +217,19 @@ func (o *L2Block) UpdateChunkHashInRange(ctx context.Context, startIndex uint64,
if len(dbTX) > 0 && dbTX[0] != nil { if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0] db = dbTX[0]
} }
db = db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Where("number >= ? AND number <= ?", startIndex, endIndex)
db = db.WithContext(ctx).Model(&L2Block{}).Where("number >= ? AND number <= ?", startIndex, endIndex)
tx := db.Update("chunk_hash", chunkHash) tx := db.Update("chunk_hash", chunkHash)
if tx.Error != nil {
if tx.RowsAffected != int64(endIndex-startIndex+1) { return fmt.Errorf("L2Block.UpdateChunkHashInRange error: %w, start index: %v, end index: %v, chunk hash: %v", tx.Error, startIndex, endIndex, chunkHash)
return fmt.Errorf("expected %d rows to be updated, got %d", endIndex-startIndex+1, tx.RowsAffected)
} }
return tx.Error // sanity check
if uint64(tx.RowsAffected) != endIndex-startIndex+1 {
return fmt.Errorf("L2Block.UpdateChunkHashInRange: incorrect number of rows affected, expected: %v, got: %v", endIndex-startIndex+1, tx.RowsAffected)
}
return nil
} }

View File

@@ -1,61 +0,0 @@
package migrate
import (
"database/sql"
"embed"
"os"
"strconv"
"github.com/pressly/goose/v3"
)
//go:embed migrations/*.sql
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("scroll_migrations")
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
goose.SetVerbose(verbose)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
}
// Rollback rollback to the given version
func Rollback(db *sql.DB, version *int64) error {
if version != nil {
return goose.DownTo(db, MigrationsDir, *version)
}
return goose.Down(db, MigrationsDir)
}
// ResetDB clean and migrate db.
func ResetDB(db *sql.DB) error {
if err := Rollback(db, new(int64)); err != nil {
return err
}
return Migrate(db)
}
// Current get current version
func Current(db *sql.DB) (int64, error) {
return goose.GetDBVersion(db)
}
// Status is normal or not
func Status(db *sql.DB) error {
return goose.Version(db, MigrationsDir)
}
// Create a new migration folder
func Create(db *sql.DB, name, migrationType string) error {
return goose.Create(db, MigrationsDir, name, migrationType)
}

View File

@@ -1,86 +0,0 @@
package migrate
import (
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
)
var (
base *docker.App
pgDB *sqlx.DB
)
func initEnv(t *testing.T) error {
// Start db container.
base.RunDBImage(t)
// Create db orm handler.
factory, err := database.NewOrmFactory(base.DBConfig)
if err != nil {
return err
}
pgDB = factory.GetDB()
return nil
}
func TestMigrate(t *testing.T) {
base = docker.NewDockerApp()
if err := initEnv(t); err != nil {
t.Fatal(err)
}
t.Run("testCurrent", testCurrent)
t.Run("testStatus", testStatus)
t.Run("testResetDB", testResetDB)
t.Run("testMigrate", testMigrate)
t.Run("testRollback", testRollback)
t.Cleanup(func() {
base.Free()
})
}
func testCurrent(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, 0, int(cur))
}
func testStatus(t *testing.T) {
status := Status(pgDB.DB)
assert.NoError(t, status)
}
func testResetDB(t *testing.T) {
assert.NoError(t, ResetDB(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 5, int(cur))
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur > 0)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, version > 0)
assert.NoError(t, Rollback(pgDB.DB, nil))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur+1 == version)
}

View File

@@ -1,37 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table l1_message
(
queue_index BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
gas_limit BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer1_hash VARCHAR NOT NULL,
layer2_hash VARCHAR DEFAULT NULL,
status INTEGER DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l1_message_hash_uindex
on l1_message (msg_hash);
create unique index l1_message_nonce_uindex
on l1_message (queue_index);
create index l1_message_height_index
on l1_message (height);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_message;
-- +goose StatementEnd

View File

@@ -10,13 +10,11 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/config" "scroll-tech/database/migrate"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
var ( var (
@@ -27,10 +25,10 @@ var (
chunkOrm *Chunk chunkOrm *Chunk
batchOrm *Batch batchOrm *Batch
wrappedBlock1 *bridgeTypes.WrappedBlock wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *bridgeTypes.WrappedBlock wrappedBlock2 *types.WrappedBlock
chunk1 *bridgeTypes.Chunk chunk1 *types.Chunk
chunk2 *bridgeTypes.Chunk chunk2 *types.Chunk
chunkHash1 common.Hash chunkHash1 common.Hash
chunkHash2 common.Hash chunkHash2 common.Hash
) )
@@ -46,8 +44,8 @@ func setupEnv(t *testing.T) {
base = docker.NewDockerApp() base = docker.NewDockerApp()
base.RunDBImage(t) base.RunDBImage(t)
var err error var err error
db, err = utils.InitDB( db, err = database.InitDB(
&config.DBConfig{ &database.Config{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
@@ -64,28 +62,22 @@ func setupEnv(t *testing.T) {
l2BlockOrm = NewL2Block(db) l2BlockOrm = NewL2Block(db)
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json") templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
if err != nil { assert.NoError(t, err)
t.Fatalf("failed to read file: %v", err) wrappedBlock1 = &types.WrappedBlock{}
} err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
wrappedBlock1 = &bridgeTypes.WrappedBlock{} assert.NoError(t, err)
if err = json.Unmarshal(templateBlockTrace, wrappedBlock1); err != nil {
t.Fatalf("failed to unmarshal block trace: %v", err)
}
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json") templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
if err != nil { assert.NoError(t, err)
t.Fatalf("failed to read file: %v", err) wrappedBlock2 = &types.WrappedBlock{}
} err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
wrappedBlock2 = &bridgeTypes.WrappedBlock{} assert.NoError(t, err)
if err = json.Unmarshal(templateBlockTrace, wrappedBlock2); err != nil {
t.Fatalf("failed to unmarshal block trace: %v", err)
}
chunk1 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock1}} chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
chunkHash1, err = chunk1.Hash(0) chunkHash1, err = chunk1.Hash(0)
assert.NoError(t, err) assert.NoError(t, err)
chunk2 = &bridgeTypes.Chunk{Blocks: []*bridgeTypes.WrappedBlock{wrappedBlock2}} chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0)) chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -102,12 +94,12 @@ func TestL2BlockOrm(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err) assert.NoError(t, err)
height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background()) height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(3), height) assert.Equal(t, uint64(3), height)
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background()) blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background())
assert.NoError(t, err) assert.NoError(t, err)
@@ -135,9 +127,6 @@ func TestChunkOrm(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1) dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex()) assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
@@ -177,35 +166,24 @@ func TestBatchOrm(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*bridgeTypes.WrappedBlock{wrappedBlock1, wrappedBlock2}) batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
assert.NoError(t, err)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*bridgeTypes.Chunk{chunk1})
assert.NoError(t, err) assert.NoError(t, err)
hash1 := batch1.Hash hash1 := batch1.Hash
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0) batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err) assert.NoError(t, err)
batchHeader1, err := bridgeTypes.DecodeBatchHeader(batch1.BatchHeader) batchHeader1, err := types.DecodeBatchHeader(batch1.BatchHeader)
assert.NoError(t, err) assert.NoError(t, err)
batchHash1 := batchHeader1.Hash().Hex() batchHash1 := batchHeader1.Hash().Hex()
assert.Equal(t, hash1, batchHash1) assert.Equal(t, hash1, batchHash1)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*bridgeTypes.Chunk{chunk2}) batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
assert.NoError(t, err) assert.NoError(t, err)
hash2 := batch2.Hash hash2 := batch2.Hash
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1) batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err) assert.NoError(t, err)
batchHeader2, err := bridgeTypes.DecodeBatchHeader(batch2.BatchHeader) batchHeader2, err := types.DecodeBatchHeader(batch2.BatchHeader)
assert.NoError(t, err) assert.NoError(t, err)
batchHash2 := batchHeader2.Hash().Hex() batchHash2 := batchHeader2.Hash().Hex()
assert.Equal(t, hash2, batchHash2) assert.Equal(t, hash2, batchHash2)
@@ -224,32 +202,11 @@ func TestBatchOrm(t *testing.T) {
assert.Equal(t, types.RollupPending, rollupStatus[0]) assert.Equal(t, types.RollupPending, rollupStatus[0])
assert.Equal(t, types.RollupPending, rollupStatus[1]) assert.Equal(t, types.RollupPending, rollupStatus[1])
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash1, types.ProvingTaskSkipped)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskFailed)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupCommitted)
assert.NoError(t, err)
count, err = batchOrm.UpdateSkippedBatches(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), count)
count, err = batchOrm.UpdateSkippedBatches(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(0), count)
batch, err := batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizationSkipped, types.RollupStatus(batch.RollupStatus))
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified) err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err) assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1) dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err, gorm.ErrRecordNotFound) assert.Error(t, err)
assert.Nil(t, dbProof) assert.Nil(t, dbProof)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified) err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)

View File

@@ -1,136 +0,0 @@
package types
import (
"encoding/binary"
"errors"
"math"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
)
const nonZeroByteGas uint64 = 16
const zeroByteGas uint64 = 4
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}
// NumL1Messages returns the number of L1 messages in this block.
// This number is the sum of included and skipped L1 messages.
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var lastQueueIndex *uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
lastQueueIndex = &txData.Nonce
}
}
if lastQueueIndex == nil {
return 0
}
// note: last queue index included before this block is totalL1MessagePoppedBefore - 1
// TODO: cache results
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
}
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
bytes := make([]byte, 60)
if !w.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
if len(w.Transactions) > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
return bytes, nil
}
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
var size uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
size += uint64(len(txData.Data))
}
return size
}
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
// TODO: This will need to be adjusted.
// The part added here is only the calldata cost,
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
var total uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
for _, b := range rlpTxData {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
for _, b := range txLen {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
}
return total
}
// L2TxsNum calculates the number of l2 txs.
func (w *WrappedBlock) L2TxsNum() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}

View File

@@ -1,43 +0,0 @@
package utils
import (
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"scroll-tech/bridge/internal/config"
)
// InitDB init the db handler
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: logger.Default.LogMode(logger.Info),
})
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
}
// CloseDB close the db handler. notice the db handler only can close when then program exit.
func CloseDB(db *gorm.DB) error {
sqlDB, err := db.DB()
if err != nil {
return err
}
if err := sqlDB.Close(); err != nil {
return err
}
return nil
}

View File

@@ -60,8 +60,8 @@ contract MockBridgeL1 {
/// @notice Emitted when a batch is finalized. /// @notice Emitted when a batch is finalized.
/// @param batchHash The hash of the batch /// @param batchHash The hash of the batch
/// @param stateRoot The state root in layer 2 after this batch. /// @param stateRoot The state root on layer 2 after this batch.
/// @param withdrawRoot The merkle root in layer2 after this batch. /// @param withdrawRoot The merkle root on layer2 after this batch.
event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot); event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
/*********** /***********

View File

@@ -11,12 +11,12 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/docker"
"scroll-tech/database/migrate"
bcmd "scroll-tech/bridge/cmd" bcmd "scroll-tech/bridge/cmd"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge" "scroll-tech/bridge/mock_bridge"
) )
@@ -46,13 +46,13 @@ var (
) )
func setupDB(t *testing.T) *gorm.DB { func setupDB(t *testing.T) *gorm.DB {
cfg := &config.DBConfig{ cfg := &database.Config{
DSN: base.DBConfig.DSN, DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName, DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum, MaxIdleNum: base.DBConfig.MaxIdleNum,
} }
db, err := utils.InitDB(cfg) db, err := database.InitDB(cfg)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -9,18 +9,17 @@ import (
gethTypes "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
func testImportL1GasPrice(t *testing.T) { func testImportL1GasPrice(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
prepareContracts(t) prepareContracts(t)
@@ -44,10 +43,10 @@ func testImportL1GasPrice(t *testing.T) {
l1BlockOrm := orm.NewL1Block(db) l1BlockOrm := orm.NewL1Block(db)
// check db status // check db status
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight() latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, number, latestBlockHeight) assert.Equal(t, number, latestBlockHeight)
blocks, err := l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight}) blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(blocks), 1) assert.Equal(t, len(blocks), 1)
assert.Empty(t, blocks[0].OracleTxHash) assert.Empty(t, blocks[0].OracleTxHash)
@@ -55,7 +54,7 @@ func testImportL1GasPrice(t *testing.T) {
// relay gas price // relay gas price
l1Relayer.ProcessGasPriceOracle() l1Relayer.ProcessGasPriceOracle()
blocks, err = l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight}) blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(blocks), 1) assert.Equal(t, len(blocks), 1)
assert.NotEmpty(t, blocks[0].OracleTxHash) assert.NotEmpty(t, blocks[0].OracleTxHash)
@@ -64,7 +63,7 @@ func testImportL1GasPrice(t *testing.T) {
func testImportL2GasPrice(t *testing.T) { func testImportL2GasPrice(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
prepareContracts(t) prepareContracts(t)
l2Cfg := bridgeApp.Config.L2Config l2Cfg := bridgeApp.Config.L2Config
@@ -72,8 +71,8 @@ func testImportL2GasPrice(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// add fake chunk // add fake chunk
chunk := &bridgeTypes.Chunk{ chunk := &types.Chunk{
Blocks: []*bridgeTypes.WrappedBlock{ Blocks: []*types.WrappedBlock{
{ {
Header: &gethTypes.Header{ Header: &gethTypes.Header{
Number: big.NewInt(1), Number: big.NewInt(1),
@@ -81,8 +80,8 @@ func testImportL2GasPrice(t *testing.T) {
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0), BaseFee: big.NewInt(0),
}, },
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawRoot: common.Hash{},
}, },
}, },
} }
@@ -90,7 +89,7 @@ func testImportL2GasPrice(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
batchOrm := orm.NewBatch(db) batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*bridgeTypes.Chunk{chunk}) _, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*types.Chunk{chunk})
assert.NoError(t, err) assert.NoError(t, err)
// check db status // check db status

View File

@@ -11,17 +11,17 @@ import (
"github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
) )
func testRelayL1MessageSucceed(t *testing.T) { func testRelayL1MessageSucceed(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
prepareContracts(t) prepareContracts(t)

View File

@@ -11,16 +11,15 @@ import (
_ "scroll-tech/bridge/cmd/msg_relayer/app" _ "scroll-tech/bridge/cmd/msg_relayer/app"
_ "scroll-tech/bridge/cmd/rollup_relayer/app" _ "scroll-tech/bridge/cmd/rollup_relayer/app"
"scroll-tech/common/database"
cutils "scroll-tech/common/utils" cutils "scroll-tech/common/utils"
"scroll-tech/bridge/internal/utils"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func testProcessStart(t *testing.T) { func testProcessStart(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
bridgeApp.RunApp(t, cutils.EventWatcherApp) bridgeApp.RunApp(t, cutils.EventWatcherApp)
bridgeApp.RunApp(t, cutils.GasOracleApp) bridgeApp.RunApp(t, cutils.GasOracleApp)
@@ -32,7 +31,7 @@ func testProcessStart(t *testing.T) {
func testProcessStartEnableMetrics(t *testing.T) { func testProcessStartEnableMetrics(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
port, err := rand.Int(rand.Reader, big.NewInt(2000)) port, err := rand.Int(rand.Reader, big.NewInt(2000))
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -10,6 +10,7 @@ import (
gethTypes "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
@@ -17,13 +18,11 @@ import (
"scroll-tech/bridge/internal/controller/relayer" "scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher" "scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/internal/orm" "scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
) )
func testCommitBatchAndFinalizeBatch(t *testing.T) { func testCommitBatchAndFinalizeBatch(t *testing.T) {
db := setupDB(t) db := setupDB(t)
defer utils.CloseDB(db) defer database.CloseDB(db)
prepareContracts(t) prepareContracts(t)
@@ -37,7 +36,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db) l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// add some blocks to db // add some blocks to db
var wrappedBlocks []*bridgeTypes.WrappedBlock var wrappedBlocks []*types.WrappedBlock
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
header := gethTypes.Header{ header := gethTypes.Header{
Number: big.NewInt(int64(i)), Number: big.NewInt(int64(i)),
@@ -45,10 +44,10 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
Difficulty: big.NewInt(0), Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0), BaseFee: big.NewInt(0),
} }
wrappedBlocks = append(wrappedBlocks, &bridgeTypes.WrappedBlock{ wrappedBlocks = append(wrappedBlocks, &types.WrappedBlock{
Header: &header, Header: &header,
Transactions: nil, Transactions: nil,
WithdrawTrieRoot: common.Hash{}, WithdrawRoot: common.Hash{},
}) })
} }
@@ -105,9 +104,8 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, types.RollupCommitted, statuses[0]) assert.Equal(t, types.RollupCommitted, statuses[0])
// add dummy proof // add dummy proof
proof := &message.AggProof{ proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
} }
err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100) err = batchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -1,56 +0,0 @@
package utils
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
)
type ethClient interface {
BlockNumber(ctx context.Context) (uint64, error)
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
}
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
switch true {
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
var tag *big.Int
if confirm == rpc.FinalizedBlockNumber {
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
} else {
tag = big.NewInt(int64(rpc.SafeBlockNumber))
}
header, err := client.HeaderByNumber(ctx, tag)
if err != nil {
return 0, err
}
if !header.Number.IsInt64() {
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
}
return header.Number.Uint64(), nil
case confirm == rpc.LatestBlockNumber:
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
return number, nil
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
cfmNum := uint64(confirm.Int64())
if number >= cfmNum {
return number - cfmNum, nil
}
return 0, nil
default:
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
}
}

Some files were not shown because too many files have changed in this diff Show More