Compare commits

..

85 Commits

Author SHA1 Message Date
georgehao
2fd6fea95f feat: debug performance 2024-03-26 13:34:53 +08:00
Steven
9e8c3432c3 fix(libzkp): upgrade to use prover v0.9.7 (#989)
Co-authored-by: silathdiir <silathdiir@users.noreply.github.com>
2023-10-13 02:34:56 -05:00
Xi Lin
ff380141a8 feat(contracts): update ScrollOwner initialization script (#987)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-10-12 08:51:20 +02:00
HAOYUatHZ
2216ad4271 refactor: remove rpc-gateway submodule (#985) 2023-10-11 16:57:41 +08:00
colin
25d5fabac9 fix(gas-oracle): fetch block header (#984)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-10-10 17:25:39 +08:00
colin
d494f4419c feat(bridge-history-api): add USDC/LIDO/DAI gateways (#983)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-10-09 20:40:12 +08:00
colin
e843419397 fix(rollup-relayer): better estimate commit batch gas limit (#982)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-10-09 08:31:31 +02:00
colin
badde3cba5 fix(rollup-relayer): better handle commit batch revert (#981)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-10-08 13:39:17 +02:00
colin
caa16e1676 fix(rollup-relayer): address underestimation of commitBatch gas in low-tx batches (#980)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-10-07 15:35:53 +02:00
Xi Lin
7c742da488 feat(contracts): add emergency role to pause L1/L2ScrollMessenger (#975)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-10-07 11:33:09 +02:00
Péter Garamvölgyi
0cdb0dc7a9 feat: remove DAI gateway from deployment script (#979) 2023-10-06 18:03:21 +02:00
Xi Lin
3143373f5f feat(contracts): remove rate limiter related codes (#978) 2023-10-05 21:16:01 +02:00
Steven
18ee6a67c5 fix(libzkp): upgrade libzkp to v0.9.5 (#977)
Co-authored-by: silathdiir <silathdiir@users.noreply.github.com>
2023-10-05 01:53:24 -05:00
Xi Lin
05da46a719 test(contracts): OZ-L01 Insufficient Tests When Using BitMaps (#956)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-10-04 15:59:53 -07:00
Xi Lin
55e0b11d17 fix(contracts): OZ-N03 Inconsistent Usage of msg.sender (#906) 2023-10-04 15:45:17 -07:00
Steven
6f72d0447e fix(libzkp): upgrade libzkp to v0.9.4 (#973)
Co-authored-by: silathdiir <silathdiir@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-10-02 15:39:51 +08:00
georgehao
76d66eba58 feat: add chunk/batch get_task index (#976)
Co-authored-by: maskpp <maskpp266@gmail.com>
2023-09-29 08:55:10 -05:00
colin
c551609e17 feat(watcher&rollup): add block commit calldata size and commit/finalize batch revert metrics (#974)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-09-29 15:33:08 +08:00
colin
47e5a43646 fix(bridge-history): add cache hit tracing logs (#972) 2023-09-27 13:11:01 +08:00
colin
7604612581 refactor(bridge-history): rename metrics (#971)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-09-27 01:07:22 +08:00
colin
35d4ec5ad0 feat(bridge-history): add cache hit metrics and cache non-existent keys (#970)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-09-26 23:43:09 +08:00
colin
8f745e9836 fix(bridge-history): duplicated symbol and metric prefix (#969)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-09-26 18:52:18 +08:00
colin
4ec1045916 feat(bridge-history): add GIN metrics and separate /ready and /health endpoints to different ports (#968) 2023-09-26 18:10:16 +08:00
colin
f94e21dd45 fix(bridge-history): bump version (#967) 2023-09-26 17:11:44 +08:00
colin
205641a65c pref(bridge-history): add cache in APIs and merge RDS queries (#966)
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2023-09-26 17:03:53 +08:00
Steven
d02f41b2c9 fix(libzkp): upgrade libzkp to v0.9.3 (#965)
Co-authored-by: silathdiir <silathdiir@users.noreply.github.com>
2023-09-25 15:21:23 +08:00
Péter Garamvölgyi
72204358f0 bump version v4.3.19 2023-09-21 23:08:37 +02:00
colin
072bc21d20 fix(CI): golint (#964) 2023-09-21 23:00:31 +02:00
georgehao
f7a2465db8 fix(coordinator): fix can't assigned failure task (#963)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2023-09-22 02:31:28 +08:00
colin
154ff0c8a0 perf(bridge-history): optimize get claimable l2 sent msg query (#959)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-09-22 02:17:16 +08:00
georgehao
2b266aaa68 perf(coordinator): optimize coordinator get task's index (#962)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2023-09-22 01:30:57 +08:00
georgehao
410f14bc7d perf(coordinator): optimize get_task of bad index (#961)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2023-09-21 23:48:34 +08:00
Xi Lin
4d903bc9b2 feat(contracts): foundry scripts for ScrollOwner (#838)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-09-21 17:45:24 +02:00
georgehao
59a2f1e998 perf(coordinator): optimize get task's optimistic lock (#960)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2023-09-21 22:44:06 +08:00
georgehao
20c5e9855b perf(coordinator): use optimistic lock during batch/chunk assignment (#958)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-09-21 17:03:42 +08:00
Steven
1f2fe74cbe fix(libzkp): set StorageTrace optional in ChunkProof (#953)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-21 12:39:41 +08:00
Xi Lin
0e12661fd5 docs(contracts): OZ-N01 Misleading Documentation (#955)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-21 12:37:56 +08:00
Xi Lin
04e66231e5 fix(contracts): OZ-L02 Implicit Limitation of Withdrawal (#954) 2023-09-20 12:18:19 +02:00
Xi Lin
417a228523 fix(contracts): OZ-L03 Inconsistency of Allowing a Trusted Forwarder (#846)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-09-19 14:50:21 -07:00
georgehao
dcd85b2f56 feat(coordinator): bump version (#952)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2023-09-19 17:50:25 +08:00
georgehao
afb6476823 feat: fix cleanup bug again (#951) 2023-09-19 17:45:45 +08:00
georgehao
d991d6b99d fix(coordinator): fix clean challenge bug (#950)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2023-09-19 16:41:33 +08:00
Steven
f0920362c5 fix(libzkp): try to convert to string for panic errors (#949) 2023-09-19 12:47:37 +08:00
Steven
5eed174b9e fix(libzkp): upgrade libzkp to use prover v0.9.1 (#948)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-09-17 08:16:22 +08:00
georgehao
a79992e772 feat(coordinator): clean up challenge (#946)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-17 08:14:47 +08:00
HAOYUatHZ
2a54c8aae6 build: fix Makefile (#947) 2023-09-14 21:58:47 +08:00
Steven
3bd0f2fa38 fix(libzkp): upgrade to use prover v0.9.0 (#945) 2023-09-14 20:01:19 +08:00
Xi Lin
ae95e61902 fix(contracts): OZ-M01 Lack of Expiration for Retrying Transactions (#840)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: icemelon <icemelon@users.noreply.github.com>
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
2023-09-13 12:49:08 -07:00
georgehao
8bae647921 feat: remove prover-stats-api (#944) 2023-09-13 21:03:44 +08:00
maskpp
1f4df55d12 feat: add enable to chain-monitor config (#942)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-09-13 18:04:11 +08:00
dependabot[bot]
f21d4adbfd build(deps): bump @openzeppelin/contracts-upgradeable from 4.9.2 to 4.9.3 in /contracts (#935)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Xi Lin <zimpha@gmail.com>
2023-09-13 09:48:48 +02:00
Xi Lin
145edfccb9 refactor(contracts): merge FeeVault and L2TxFeeVault (#943) 2023-09-13 09:36:53 +02:00
dependabot[bot]
6f26114f23 build(deps): bump @openzeppelin/contracts from 4.9.2 to 4.9.3 in /contracts (#936)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-12 10:38:01 +08:00
Xi Lin
56150da353 docs(contracts): OZ-L03 Missing Docstrings (#940)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-12 10:27:54 +08:00
Péter Garamvölgyi
fc53572334 fix: mark used storage slots in L2ScrollMessenger (#941) 2023-09-11 15:35:18 +02:00
georgehao
0730e91292 feat(observability):add ready&health check api for k8s (#938)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-09-11 19:22:19 +08:00
Xi Lin
4d3ff66446 fix(contracts): OZ-L02 Lack of gap Variable (#929)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-11 11:06:09 +08:00
Xi Lin
ae1cb30ed1 fix(contracts): OZ-M01 L2USDCGateway Is Missing Rate Limiter Functionality (#927)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-11 10:47:14 +08:00
Xi Lin
ad14836796 fix(contracts): Zellic 3.2 Addtional checks could be performed (#892)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-11 10:36:54 +08:00
HAOYUatHZ
9a5517472f perf(db): add idx for chunk_index and batch_index (#937) 2023-09-11 10:03:17 +08:00
colin
1c7490a88e refactor(rollup-relayer): change minGasLimit to fallbackGasLimit (#939)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-09-08 13:25:40 -07:00
Xi Lin
7559dc42b4 feat(contacts): use bitmap for skipped messages (#893)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
2023-09-07 14:31:06 +08:00
ChuhanJin
49b72bd4e4 docs(bridge-history-api): add readme contents (#922)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-09-07 13:32:43 +08:00
colin
8c41b0b86b test(rollup-relayer): add chunk and batch proposer limit tests (#932)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-09-07 13:29:08 +08:00
HAOYUatHZ
cd456ee3db feat(prover, coordinator): update vk handling logic (#931)
Co-authored-by: Steven Gu <asongala@163.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-09-07 10:59:47 +08:00
Steven
2b6a3c9874 feat: libzkp v0.8.1 (#894)
Co-authored-by: silathdiir <silathdiir@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-09-07 10:34:35 +08:00
colin
d39db01c5b fix(coordinator): decrease active attempts zero check (#934)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-09-06 18:22:25 +08:00
colin
90e3dddf91 fix: bump version (#933) 2023-09-06 16:08:41 +08:00
georgehao
c6cc51bcfd feat(coordinator): prover task assigned multiple prover (#871)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-06 15:28:31 +08:00
Xi Lin
0b0b84a513 fix(contracts): OZ-N01 Unused Imports (#930)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-06 13:34:29 +08:00
gsovereignty
83c8071123 docs: fix link in readme (#926)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-06 12:00:07 +08:00
Xi Lin
e35de74495 docs(contracts): OZ-L01 Misleading Comment (#928) 2023-09-06 11:59:35 +08:00
Ahmed Castro
33089b829f docs: add code-of-conduct and contributing documents (#722)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-09-05 08:16:31 -07:00
colin
8e27052b36 fix(l1-watcher): soft delete blocks when reorg (#923)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-09-05 16:32:48 +08:00
colin
25b956f9b5 fix(gas-oracle): fetch base fee from the latest L1 block (#920)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-09-05 15:21:06 +08:00
colin
f4663fd249 feat(rollup-relayer): add number of blocks per chunk limit (#880)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-09-05 15:00:16 +08:00
Péter Garamvölgyi
c71fa5a5fc style: use MAX_TX_IN_CHUNK env name in deployment scripts (#921) 2023-09-04 14:35:04 +02:00
Péter Garamvölgyi
4af3834e36 test(fee-vault): add new test testCantWithdrawMoreThanBalance (#918)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-04 08:15:26 +02:00
Haichen Shen
9983585bdd docs: Update readme (#919) 2023-09-04 13:56:43 +08:00
Xi Lin
d288b34536 fix(contracts): OZ-L02 Anyone Can Steal ERC-20 Tokens From GasSwap (#844)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: icemelon <icemelon@users.noreply.github.com>
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
2023-09-04 12:31:41 +08:00
Haichen Shen
a2fe246551 docs(rollup): improve readme (#917)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-09-02 23:23:17 +08:00
Péter Garamvölgyi
8699a22fa3 feat(contracts): allow setting withdraw amount in fee vault (#912)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-09-02 02:23:53 -07:00
Xi Lin
d668180e9a fix(contracts): OZ-M04 Use of Non-Production-Ready Trusted Forwarder (#843)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: icemelon <icemelon@users.noreply.github.com>
2023-09-02 02:23:31 -07:00
HAOYUatHZ
d3c2e34650 docs(rollup): fix README (#916) 2023-09-02 15:54:26 +08:00
HAOYUatHZ
38551c4eeb refactor: rename bridge to rollup (#644)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-09-02 15:42:01 +08:00
295 changed files with 6193 additions and 6066 deletions

View File

@@ -1,4 +1,4 @@
name: Bump Version
name: Bump version
on:
pull_request:

View File

@@ -132,24 +132,3 @@ jobs:
tags: scrolltech/coordinator:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
prover-stats-api:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push prover-stats-api docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/prover-stats-api.Dockerfile
push: true
tags: scrolltech/prover-stats-api:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

View File

@@ -36,7 +36,7 @@ jobs:
- name: Build prerequisites
run: |
make dev_docker
make -C bridge mock_abi
make -C rollup mock_abi
make -C common/bytecode all
- name: Run integration tests
run: |

View File

@@ -1,80 +0,0 @@
name: ProverStatsAPI
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
defaults:
run:
working-directory: 'prover-stats-api'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
make test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: prover-stats-api
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/prover-stats-api/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -1,4 +1,4 @@
name: Bridge
name: Rollup
on:
push:
@@ -8,11 +8,11 @@ on:
- develop
- alpha
paths:
- 'bridge/**'
- 'rollup/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/bridge.yml'
- '.github/workflows/rollup.yml'
pull_request:
types:
- opened
@@ -20,11 +20,11 @@ on:
- synchronize
- ready_for_review
paths:
- 'bridge/**'
- 'rollup/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/bridge.yml'
- '.github/workflows/rollup.yml'
jobs:
check:
@@ -46,7 +46,7 @@ jobs:
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint
working-directory: 'bridge'
working-directory: 'rollup'
run: |
rm -rf $HOME/.cache/golangci-lint
make mock_abi
@@ -64,14 +64,14 @@ jobs:
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
run: goimports -local scroll-tech/bridge/ -w .
working-directory: 'bridge'
run: goimports -local scroll-tech/rollup/ -w .
working-directory: 'rollup'
- name: Run go mod tidy
run: go mod tidy
working-directory: 'bridge'
working-directory: 'rollup'
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'bridge'
working-directory: 'rollup'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
@@ -97,13 +97,13 @@ jobs:
- name: Build prerequisites
run: |
make dev_docker
make -C bridge mock_abi
- name: Build bridge binaries
working-directory: 'bridge'
make -C rollup mock_abi
- name: Build rollup binaries
working-directory: 'rollup'
run: |
make bridge_bins
- name: Test bridge packages
working-directory: 'bridge'
make rollup_bins
- name: Test rollup packages
working-directory: 'rollup'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
@@ -111,7 +111,7 @@ jobs:
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: bridge
flags: rollup
# docker-build:
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest

17
.gitignore vendored
View File

@@ -1,9 +1,22 @@
.idea
# Asset files
assets/params*
assets/seed
coverage.txt
# Built binaries
build/bin
coverage.txt
*.integration.txt
# Visual Studio Code
.vscode
# IntelliJ
.idea
# MacOS
.DS_Store
# misc
sftp-config.json
*~

3
.gitmodules vendored
View File

@@ -1,9 +1,6 @@
[submodule "l2geth"]
path = l2geth
url = git@github.com:scroll-tech/go-ethereum.git
[submodule "rpc-gateway"]
path = rpc-gateway
url = git@github.com:scroll-tech/rpc-gateway.git
[submodule "contracts/lib/ds-test"]
path = contracts/lib/ds-test
url = https://github.com/dapphub/ds-test

128
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,128 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
contributor@scroll.io.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

42
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,42 @@
## Contributing
[fork]: /fork
[pr]: /compare
[style]: https://standardjs.com/
[code-of-conduct]: CODE_OF_CONDUCT.md
Hi there! We're thrilled that you'd like to contribute to this project. Your help is essential for keeping it great.
Please note that this project is released with a [Contributor Code of Conduct][code-of-conduct]. By participating in this project you agree to abide by its terms.
## Contribute to Scroll
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
## Issues and PRs
If you have suggestions for how this project could be improved, or want to report a bug, open an issue! We'd love all and any contributions. If you have questions, too, we'd love to hear them.
We'd also love PRs. If you're thinking of a large PR, we advise opening up an issue first to talk about it, though! Look at the links below if you're not sure how to open a PR.
## Submitting a pull request
1. [Fork][fork] and clone the repository.
1. Create a new branch: `git checkout -b my-branch-name`.
1. Make your change, add tests, and make sure the tests still pass.
1. Push to your fork and [submit a pull request][pr].
1. Pat yourself on the back and wait for your pull request to be reviewed and merged.
Here are a few things you can do that will increase the likelihood of your pull request being accepted:
- Write and update tests.
- Keep your changes as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
Work in Progress pull requests are also welcome to get feedback early on, or if there is something that blocked you.
## Resources
- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/)
- [GitHub Help](https://help.github.com)

View File

@@ -8,7 +8,7 @@ help: ## Display this help message
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
lint: ## The code's format and security checks.
make -C bridge lint
make -C rollup lint
make -C common lint
make -C coordinator lint
make -C database lint
@@ -17,7 +17,7 @@ lint: ## The code's format and security checks.
update: ## update dependencies
go work sync
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
@@ -25,7 +25,7 @@ update: ## update dependencies
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/rollup/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .

View File

@@ -1,7 +1,33 @@
# Scroll Monorepo
[![rollup](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml)
[![contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml)
[![bridge-history](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml)
[![coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml)
[![prover](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml)
[![integration](https://github.com/scroll-tech/scroll/actions/workflows/integration.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/integration.yml)
[![codecov](https://codecov.io/gh/scroll-tech/scroll/branch/develop/graph/badge.svg?token=VJVHNQWGGW)](https://codecov.io/gh/scroll-tech/scroll)
<a href="https://scroll.io">Scroll</a> is a zkRollup Layer 2 dedicated to enhance Ethereum scalability through a bytecode-equivalent [zkEVM](https://github.com/scroll-tech/zkevm-circuits) circuit. This monorepo encompasses essential infrastructure components of the Scroll protocol. It contains the L1 and L2 contracts, the rollup node, the prover client, and the prover coordinator.
## Directory Structure
<pre>
├── <a href="./bridge-history-api/">bridge-history-api</a>: Bridge history service that collects deposit and withdraw events from both L1 and L2 chain and generates withdrawal proofs
├── <a href="./common/">common</a>: Common libraries and types
├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers
├── <a href="./database">database</a>: Database client and schema definition
├── <a href="./src">l2geth</a>: Scroll execution node
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
├── <a href="./rollup">rollup</a>: Rollup-related services
├── <a href="./rpc-gateway">rpc-gateway</a>: RPC gateway external repo
└── <a href="./tests">tests</a>: Integration tests
</pre>
## Contributing
We welcome community contributions to this repository. Before you submit any issues or PRs, please read the [Code of Conduct](CODE_OF_CONDUCT.md) and the [Contribution Guideline](CONTRIBUTING.md).
## Prerequisites
+ Go 1.19
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
@@ -15,14 +41,14 @@ docker pull postgres
make dev_docker
```
## Testing Bridge & Coordinator
## Testing Rollup & Coordinator
### For Non-Apple Silicon (M1/M2) Macs
Run the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/bridge/...
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
@@ -55,7 +81,7 @@ This command runs a Docker container named `scroll_test_container` from the `scr
Once the Docker container is running, execute the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/bridge/...
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
@@ -63,6 +89,10 @@ go test -v -race -covermode=atomic scroll-tech/common/...
## Testing Contracts
You can find the unit tests in [`<REPO_DIR>/contracts/src/test/`](/contracts/src/test/), and integration tests in [`<REPO_DIR>/contracts/integration-test/`](/contracts/integration-test/).
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).
For more details on contracts, see [`/contracts`](/contracts).
See [`contracts`](/contracts) for more details on the contracts.
## License
Scroll Monorepo is licensed under the [MIT](./LICENSE) license.

View File

@@ -1 +1,82 @@
# bridge-history-api
This directory contains the `bridge-history-api` service that provides REST APIs to query txs interact with Scroll official bridge contracts
## Instructions
The bridge-history-api contains three distinct components
### bridgehistoryapi-db-cli
Provide init, show version, rollback, check status services of DB
```
cd ./bridge-history-api
make bridgehistoryapi-db-cli
./build/bin/bridgehistoryapi-db-cli [command]
```
### bridgehistoryapi-cross-msg-fetcher
Fetch the transactions from both l1 and l2
```
cd ./bridge-history-api
make bridgehistoryapi-cross-msg-fetcher
./build/bin/bridgehistoryapi-cross-msg-fetcher
```
### bridgehistoryapi-server
provides REST APIs. Please refer to the API details below.
```
cd ./bridge-history-api
make bridgehistoryapi-server
./build/bin/bridgehistoryapi-server
```
## APIs provided by bridgehistoryapi-server
assume `bridgehistoryapi-server` listening on `https://localhost:8080`
can change this port thru modify `config.json`
1. `/txs`
```
// @Summary get all txs under given address
// @Accept plain
// @Produce plain
// @Param address query string true "wallet address"
// @Param page_size query int true "page size"
// @Param page query int true "page"
// @Success 200
// @Router /api/txs [get]
```
2. `/txsbyhashes`
```
// @Summary get txs by given tx hashes
// @Accept plain
// @Produce plain
// @Param hashes query string array true "array of hashes list"
// @Success 200
// @Router /api/txsbyhashes [post]
```
3. `/claimable`
```
// @Summary get all claimable txs under given address
// @Accept plain
// @Produce plain
// @Param address query string true "wallet address"
// @Param page_size query int true "page size"
// @Param page query int true "page"
// @Success 200
// @Router /api/claimable [get]
```
4. `/withdraw_root`
```
// @Summary get withdraw_root of given batch index
// @Accept plain
// @Produce plain
// @Param batch_index query string true "batch_index"
// @Success 200
// @Router /api/withdraw_root [get]
```

View File

@@ -7,11 +7,13 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/urfave/cli/v2"
"bridge-history-api/config"
"bridge-history-api/internal/controller"
"bridge-history-api/internal/route"
"bridge-history-api/observability"
"bridge-history-api/utils"
)
@@ -54,13 +56,18 @@ func action(ctx *cli.Context) error {
router := gin.Default()
controller.InitController(db)
route.Route(router, cfg)
registry := prometheus.DefaultRegisterer
route.Route(router, cfg, registry)
go func() {
if runServerErr := router.Run(fmt.Sprintf(":%s", port)); runServerErr != nil {
log.Crit("run http server failure", "error", runServerErr)
}
}()
observability.Server(ctx, db)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)

View File

@@ -82,6 +82,18 @@ func action(ctx *cli.Context) error {
common.HexToAddress(cfg.L1.WETHGatewayAddr),
}
if cfg.L1.USDCGatewayAddr != "" {
l1AddressList = append(l1AddressList, common.HexToAddress(cfg.L1.USDCGatewayAddr))
}
if cfg.L1.LIDOGatewayAddr != "" {
l1AddressList = append(l1AddressList, common.HexToAddress(cfg.L1.LIDOGatewayAddr))
}
if cfg.L2.DAIGatewayAddr != "" {
l1AddressList = append(l1AddressList, common.HexToAddress(cfg.L1.DAIGatewayAddr))
}
l2AddressList := []common.Address{
common.HexToAddress(cfg.L2.CustomERC20GatewayAddr),
common.HexToAddress(cfg.L2.ERC721GatewayAddr),
@@ -92,6 +104,18 @@ func action(ctx *cli.Context) error {
common.HexToAddress(cfg.L2.WETHGatewayAddr),
}
if cfg.L2.USDCGatewayAddr != "" {
l2AddressList = append(l2AddressList, common.HexToAddress(cfg.L2.USDCGatewayAddr))
}
if cfg.L2.LIDOGatewayAddr != "" {
l2AddressList = append(l2AddressList, common.HexToAddress(cfg.L2.LIDOGatewayAddr))
}
if cfg.L2.DAIGatewayAddr != "" {
l2AddressList = append(l2AddressList, common.HexToAddress(cfg.L2.DAIGatewayAddr))
}
l1crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, crossmsg.L1ReorgHandling)
if err != nil {
log.Crit("failed to create l1 cross message fetcher", "error", err)

View File

@@ -1,33 +1,39 @@
{
"batchInfoFetcher": {
"batchIndexStartBlock": 9091265,
"ScrollChainAddr": "0xcD00DB804C819175B381b2B44Aa16A391c8a01D6"
"ScrollChainAddr": "0x1799c3Df650caB9DFBb228C971016707D8f8721D"
},
"l1": {
"confirmation": 64,
"endpoint": "https://rpc.ankr.com/eth_goerli",
"startHeight": 9090194 ,
"endpoint": "https://rpc.ankr.com/eth",
"startHeight": 18310747,
"blockTime": 10,
"MessengerAddr": "0x326517Eb8eB1Ce5eaB5b513C2e9A24839b402d90",
"ETHGatewayAddr": "0x8305cB7B8448677736095965B63d7431017328fe",
"WETHGatewayAddr": "0xe3bA3c60d99a2d9a5f817734bC85353470b23931",
"StandardERC20Gateway": "0x16c1079B27eD9c363B7D08aC5Ae937A398972A5C",
"CustomERC20GatewayAddr": "0x61f08caD3d6F77801167d3bA8669433701586643",
"ERC721GatewayAddr": "0x4A73D25A4C99CB912acaf6C5B5e554f2982201c5",
"ERC1155GatewayAddr": "0xa3F5DD3033698c2832C53f3C3Fe6E062F58cD808"
"MessengerAddr": "0x7318152B19c3c97c886D5ee6C2525E62ce8e2abA",
"ETHGatewayAddr": "0xd165b42d857eae2915625819464a2a1f91E5d0A5",
"WETHGatewayAddr": "0xb0255e4C1a919619D1CafBA51021d638c4F71b89",
"StandardERC20Gateway": "0x00fEc01A9b975bA37466B4E9006dF2C71BFE0e48",
"CustomERC20GatewayAddr": "0xD8874B0E6C3CC43C00B69D60c21Ef452d1159bDe",
"ERC721GatewayAddr": "0x131B46649F6882d686a766cb8b68c4cB0ACdeb24",
"ERC1155GatewayAddr": "0xCeE721789FAA05c7F4463efB664520656aB7C7d5",
"USDCGatewayAddr": "0x37ba659D6CC380D12Fb96567CC52FC8e1DF4E334",
"LIDOGatewayAddr": "0x892dDB2899325aBBA1fD00FDA8249B40Cbbc33F9",
"DAIGatewayAddr": "0xD8dD7787f89c7E6243AD32E0d0cCf460243C8130"
},
"l2": {
"confirmation": 1,
"endpoint": "http://staging-l2geth-rpc0.scroll.tech:8545",
"endpoint": "http://mainnet-l2geth-internal-1.mainnet.scroll.tech:8545",
"blockTime": 3,
"startHeight": 0,
"CustomERC20GatewayAddr": "0x905db21f836749fEeD12de781afc4A5Ab4Dd0d51",
"ERC721GatewayAddr": "0xC53D835514780664BCd7eCfcE7c2E5d9554dc41B",
"StandardERC20Gateway": "0x90271634BCB020e06ea4840C3f7aa61b8F860651",
"MessengerAddr": "0xE8b0956Ac75c65Aa1669e83888DA13afF2E108f4",
"ETHGatewayAddr": "0xD5938590D5dD8ce95812D4D515a219C12C551D67",
"WETHGatewayAddr": "0xb0aaA582564fade4232a16fdB1383004A6A7247F",
"ERC1155GatewayAddr": "0x4f33B1655619c2C0B7C450128Df760B4365Cb549"
"MessengerAddr": "0xda7c91Ed60DACD28Cb97B180108958c9ACC7698a",
"ETHGatewayAddr": "0x567671187b5FFbcDFe0B6EcF3e56C05508a31A87",
"WETHGatewayAddr": "0x3b03aE2F27d62E0B2b6740CA20Fc07Af4338B791",
"StandardERC20Gateway": "0xb00cb1F6f7C43D2EE8C4e2163a6bEA22441A5B7c",
"CustomERC20GatewayAddr": "0x63CCb38E9d21A72777b203267F2e4ba5C974fC62",
"ERC721GatewayAddr": "0xE2c36a2D8B5528719aE7A42A778b2D08b18d134a",
"ERC1155GatewayAddr": "0xfF14870512e42BFb85a9B7bEfDc06e9aB5A37269",
"USDCGatewayAddr": "0x97D5799CDC8eE2A7452913d7548c7cEE285719FA",
"LIDOGatewayAddr": "0xE9c5C9f67ec7B773fC76440845751F657bb953FF",
"DAIGatewayAddr": "0xC5034eB8F682b73F93C9246aa95A8eBbF82793aA"
},
"db": {
"dsn": "postgres://postgres:1234@localhost:5444/test?sslmode=disable",

View File

@@ -31,6 +31,9 @@ type LayerConfig struct {
MessengerAddr string `json:"MessengerAddr"`
ETHGatewayAddr string `json:"ETHGatewayAddr"`
WETHGatewayAddr string `json:"WETHGatewayAddr"`
USDCGatewayAddr string `json:"USDCGatewayAddr"`
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
DAIGatewayAddr string `json:"DAIGatewayAddr"`
StandardERC20Gateway string `json:"StandardERC20Gateway"`
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`

View File

@@ -3,15 +3,20 @@ module bridge-history-api
go 1.19
require (
github.com/bits-and-blooms/bitset v1.7.0
github.com/ethereum/go-ethereum v1.12.2
github.com/gin-contrib/cors v1.4.0
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.19
github.com/modern-go/reflect2 v1.0.2
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pressly/goose/v3 v3.7.0
github.com/prometheus/client_golang v1.14.0
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.3.0
gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.2
)
@@ -20,7 +25,6 @@ require (
github.com/DataDog/zstd v1.5.2 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.7.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/bytedance/sonic v1.9.2 // indirect
@@ -94,7 +98,6 @@ require (
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
@@ -117,7 +120,6 @@ require (
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/time v0.3.0 // indirect

View File

@@ -119,6 +119,8 @@ github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnR
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=
github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs=
github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazzvg=
github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
@@ -362,6 +364,8 @@ github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=

View File

@@ -24,14 +24,14 @@ func NewBatchController(db *gorm.DB) *BatchController {
func (b *BatchController) GetWithdrawRootByBatchIndex(ctx *gin.Context) {
var req types.QueryByBatchIndexRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
result, err := b.batchLogic.GetWithdrawRootByBatchIndex(ctx, req.BatchIndex)
if err != nil {
types.RenderJSON(ctx, types.ErrGetWithdrawRootByBatchIndexFailure, err, nil)
types.RenderFailure(ctx, types.ErrGetWithdrawRootByBatchIndexFailure, err)
return
}
types.RenderJSON(ctx, types.Success, nil, result)
types.RenderSuccess(ctx, result)
}

View File

@@ -10,7 +10,8 @@ var (
// HistoryCtrler is controller instance
HistoryCtrler *HistoryController
// BatchCtrler is controller instance
BatchCtrler *BatchController
BatchCtrler *BatchController
initControllerOnce sync.Once
)

View File

@@ -1,23 +1,40 @@
package controller
import (
"errors"
"reflect"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/gin-gonic/gin"
"github.com/patrickmn/go-cache"
"golang.org/x/sync/singleflight"
"gorm.io/gorm"
"bridge-history-api/internal/logic"
"bridge-history-api/internal/types"
)
const (
cacheKeyPrefixClaimableTxsByAddr = "claimableTxsByAddr:"
cacheKeyPrefixQueryTxsByHash = "queryTxsByHash:"
)
// HistoryController contains the query claimable txs service
type HistoryController struct {
historyLogic *logic.HistoryLogic
cache *cache.Cache
singleFlight singleflight.Group
cacheMetrics *cacheMetrics
}
// NewHistoryController return HistoryController instance
func NewHistoryController(db *gorm.DB) *HistoryController {
return &HistoryController{
historyLogic: logic.NewHistoryLogic(db),
cache: cache.New(30*time.Second, 10*time.Minute),
cacheMetrics: initCacheMetrics(),
}
}
@@ -25,48 +42,120 @@ func NewHistoryController(db *gorm.DB) *HistoryController {
func (c *HistoryController) GetAllClaimableTxsByAddr(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
return
}
offset := (req.Page - 1) * req.PageSize
limit := req.PageSize
txs, total, err := c.historyLogic.GetClaimableTxsByAddress(ctx, common.HexToAddress(req.Address), offset, limit)
if err != nil {
types.RenderJSON(ctx, types.ErrGetClaimablesFailure, err, nil)
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: txs, Total: total})
}
cacheKey := cacheKeyPrefixClaimableTxsByAddr + req.Address
if cachedData, found := c.cache.Get(cacheKey); found {
c.cacheMetrics.cacheHits.WithLabelValues("GetAllClaimableTxsByAddr").Inc()
// Log cache hit along with request param.
log.Info("cache hit", "request", req)
if cachedData == nil {
types.RenderSuccess(ctx, &types.ResultData{})
return
} else if resultData, ok := cachedData.(*types.ResultData); ok {
types.RenderSuccess(ctx, resultData)
return
}
// Log error for unexpected type, then fetch data from the database.
log.Error("unexpected type in cache", "expected", "*types.ResultData", "got", reflect.TypeOf(cachedData))
} else {
c.cacheMetrics.cacheMisses.WithLabelValues("GetAllClaimableTxsByAddr").Inc()
// Log cache miss along with request param.
log.Info("cache miss", "request", req)
}
result, err, _ := c.singleFlight.Do(cacheKey, func() (interface{}, error) {
txs, total, err := c.historyLogic.GetClaimableTxsByAddress(ctx, common.HexToAddress(req.Address))
if err != nil {
return nil, err
}
resultData := &types.ResultData{Result: txs, Total: total}
c.cache.Set(cacheKey, resultData, cache.DefaultExpiration)
return resultData, nil
})
// GetAllTxsByAddr defines the http get method behavior
func (c *HistoryController) GetAllTxsByAddr(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
return
}
offset := (req.Page - 1) * req.PageSize
limit := req.PageSize
message, total, err := c.historyLogic.GetTxsByAddress(ctx, common.HexToAddress(req.Address), offset, limit)
if err != nil {
types.RenderJSON(ctx, types.ErrGetTxsByAddrFailure, err, nil)
types.RenderFailure(ctx, types.ErrGetClaimablesFailure, err)
return
}
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: message, Total: total})
if resultData, ok := result.(*types.ResultData); ok {
types.RenderSuccess(ctx, resultData)
} else {
log.Error("unexpected type from singleflight", "expected", "*types.ResultData", "got", reflect.TypeOf(result))
types.RenderFailure(ctx, types.ErrGetClaimablesFailure, errors.New("unexpected error"))
}
}
// PostQueryTxsByHash defines the http post method behavior
func (c *HistoryController) PostQueryTxsByHash(ctx *gin.Context) {
var req types.QueryByHashRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
result, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
if err != nil {
types.RenderJSON(ctx, types.ErrGetTxsByHashFailure, err, nil)
if len(req.Txs) > 10 {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, errors.New("the number of hashes in the request exceeds the allowed maximum of 10"))
return
}
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: result, Total: 0})
hashesMap := make(map[string]struct{}, len(req.Txs))
results := make([]*types.TxHistoryInfo, 0, len(req.Txs))
uncachedHashes := make([]string, 0, len(req.Txs))
for _, hash := range req.Txs {
if _, exists := hashesMap[hash]; exists {
// Skip duplicate tx hash values.
continue
}
hashesMap[hash] = struct{}{}
cacheKey := cacheKeyPrefixQueryTxsByHash + hash
if cachedData, found := c.cache.Get(cacheKey); found {
c.cacheMetrics.cacheHits.WithLabelValues("PostQueryTxsByHash").Inc()
// Log cache hit along with tx hash.
log.Info("cache hit", "tx hash", hash)
if cachedData == nil {
continue
} else if txInfo, ok := cachedData.(*types.TxHistoryInfo); ok {
results = append(results, txInfo)
} else {
log.Error("unexpected type in cache", "expected", "*types.TxHistoryInfo", "got", reflect.TypeOf(cachedData))
uncachedHashes = append(uncachedHashes, hash)
}
} else {
c.cacheMetrics.cacheMisses.WithLabelValues("PostQueryTxsByHash").Inc()
// Log cache miss along with tx hash.
log.Info("cache miss", "tx hash", hash)
uncachedHashes = append(uncachedHashes, hash)
}
}
if len(uncachedHashes) > 0 {
dbResults, err := c.historyLogic.GetTxsByHashes(ctx, uncachedHashes)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsByHashFailure, err)
return
}
resultMap := make(map[string]*types.TxHistoryInfo)
for _, result := range dbResults {
results = append(results, result)
resultMap[result.Hash] = result
}
for _, hash := range uncachedHashes {
cacheKey := cacheKeyPrefixQueryTxsByHash + hash
result, found := resultMap[hash]
if found {
c.cache.Set(cacheKey, result, cache.DefaultExpiration)
} else {
c.cache.Set(cacheKey, nil, cache.DefaultExpiration)
}
}
}
resultData := &types.ResultData{Result: results, Total: uint64(len(results))}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -0,0 +1,40 @@
package controller
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type cacheMetrics struct {
cacheHits *prometheus.CounterVec
cacheMisses *prometheus.CounterVec
}
var (
initMetricsOnce sync.Once
cm *cacheMetrics
)
func initCacheMetrics() *cacheMetrics {
initMetricsOnce.Do(func() {
cm = &cacheMetrics{
cacheHits: promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "bridge_history_api_cache_hits_total",
Help: "The total number of cache hits",
},
[]string{"api"},
),
cacheMisses: promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "bridge_history_api_cache_misses_total",
Help: "The total number of cache misses",
},
[]string{"api"},
),
}
})
return cm
}

View File

@@ -23,66 +23,101 @@ func NewHistoryLogic(db *gorm.DB) *HistoryLogic {
return logic
}
// getCrossTxClaimInfo get UserClaimInfos by address
func getCrossTxClaimInfo(ctx context.Context, msgHash string, db *gorm.DB) *types.UserClaimInfo {
// updateL2TxClaimInfo updates UserClaimInfos for each transaction history.
func updateL2TxClaimInfo(ctx context.Context, txHistories []*types.TxHistoryInfo, db *gorm.DB) {
l2SentMsgOrm := orm.NewL2SentMsg(db)
rollupOrm := orm.NewRollupBatch(db)
l2sentMsg, err := l2SentMsgOrm.GetL2SentMsgByHash(ctx, msgHash)
if err != nil || l2sentMsg == nil {
log.Debug("getCrossTxClaimInfo failed", "error", err)
return &types.UserClaimInfo{}
}
batch, err := rollupOrm.GetRollupBatchByIndex(ctx, l2sentMsg.BatchIndex)
if err != nil {
log.Debug("getCrossTxClaimInfo failed", "error", err)
return &types.UserClaimInfo{}
}
return &types.UserClaimInfo{
From: l2sentMsg.Sender,
To: l2sentMsg.Target,
Value: l2sentMsg.Value,
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
Message: l2sentMsg.MsgData,
Proof: "0x" + l2sentMsg.MsgProof,
BatchHash: batch.BatchHash,
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
var l2MsgHashes []string
for _, txHistory := range txHistories {
if !txHistory.IsL1 {
l2MsgHashes = append(l2MsgHashes, txHistory.MsgHash)
}
}
l2sentMsgs, err := l2SentMsgOrm.GetL2SentMsgsByHashes(ctx, l2MsgHashes)
if err != nil || len(l2sentMsgs) == 0 {
log.Debug("GetL2SentMsgsByHashes failed", "l2 sent msgs", l2sentMsgs, "error", err)
return
}
l2MsgMap := make(map[string]*orm.L2SentMsg, len(l2sentMsgs))
var batchIndexes []uint64
for _, l2sentMsg := range l2sentMsgs {
l2MsgMap[l2sentMsg.MsgHash] = l2sentMsg
batchIndexes = append(batchIndexes, l2sentMsg.BatchIndex)
}
batches, err := rollupOrm.GetRollupBatchesByIndexes(ctx, batchIndexes)
if err != nil {
log.Debug("GetRollupBatchesByIndexes failed", "error", err)
return
}
batchMap := make(map[uint64]*orm.RollupBatch, len(batches))
for _, batch := range batches {
batchMap[batch.BatchIndex] = batch
}
for _, txHistory := range txHistories {
if txHistory.IsL1 {
continue
}
l2sentMsg, foundL2SentMsg := l2MsgMap[txHistory.MsgHash]
batch, foundBatch := batchMap[l2sentMsg.BatchIndex]
if foundL2SentMsg && foundBatch {
txHistory.ClaimInfo = &types.UserClaimInfo{
From: l2sentMsg.Sender,
To: l2sentMsg.Target,
Value: l2sentMsg.Value,
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
Message: l2sentMsg.MsgData,
Proof: "0x" + l2sentMsg.MsgProof,
BatchHash: batch.BatchHash,
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
}
}
}
}
func updateCrossTxHash(ctx context.Context, msgHash string, txInfo *types.TxHistoryInfo, db *gorm.DB) {
func updateCrossTxHashes(ctx context.Context, txHistories []*types.TxHistoryInfo, db *gorm.DB) {
msgHashes := make([]string, len(txHistories))
for i, txHistory := range txHistories {
msgHashes[i] = txHistory.MsgHash
}
relayed := orm.NewRelayedMsg(db)
relayed, err := relayed.GetRelayedMsgByHash(ctx, msgHash)
if err != nil {
log.Debug("updateCrossTxHash failed", "error", err)
return
}
if relayed == nil {
return
}
if relayed.Layer1Hash != "" {
txInfo.FinalizeTx.Hash = relayed.Layer1Hash
txInfo.FinalizeTx.BlockNumber = relayed.Height
return
}
if relayed.Layer2Hash != "" {
txInfo.FinalizeTx.Hash = relayed.Layer2Hash
txInfo.FinalizeTx.BlockNumber = relayed.Height
relayedMsgs, err := relayed.GetRelayedMsgsByHashes(ctx, msgHashes)
if err != nil || len(relayedMsgs) == 0 {
log.Debug("GetRelayedMsgsByHashes failed", "msg hashes", msgHashes, "relayed msgs", relayedMsgs, "error", err)
return
}
relayedMsgMap := make(map[string]*orm.RelayedMsg, len(relayedMsgs))
for _, relayedMsg := range relayedMsgs {
relayedMsgMap[relayedMsg.MsgHash] = relayedMsg
}
for _, txHistory := range txHistories {
if relayedMsg, found := relayedMsgMap[txHistory.MsgHash]; found {
txHistory.FinalizeTx.Hash = relayedMsg.Layer1Hash + relayedMsg.Layer2Hash
txHistory.FinalizeTx.BlockNumber = relayedMsg.Height
}
}
}
func updateCrossTxHashesAndL2TxClaimInfo(ctx context.Context, txHistories []*types.TxHistoryInfo, db *gorm.DB) {
updateCrossTxHashes(ctx, txHistories, db)
updateL2TxClaimInfo(ctx, txHistories, db)
}
// GetClaimableTxsByAddress get all claimable txs under given address
func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address common.Address, offset int, limit int) ([]*types.TxHistoryInfo, uint64, error) {
func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address common.Address) ([]*types.TxHistoryInfo, uint64, error) {
var txHistories []*types.TxHistoryInfo
l2SentMsgOrm := orm.NewL2SentMsg(h.db)
l2CrossMsgOrm := orm.NewCrossMsg(h.db)
total, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressTotalNum(ctx, address.Hex())
if err != nil || total == 0 {
return txHistories, 0, err
}
results, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressWithOffset(ctx, address.Hex(), offset, limit)
results, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddress(ctx, address.Hex())
if err != nil || len(results) == 0 {
return txHistories, 0, err
}
@@ -102,10 +137,10 @@ func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address com
for _, result := range results {
txInfo := &types.TxHistoryInfo{
Hash: result.TxHash,
MsgHash: result.MsgHash,
IsL1: false,
BlockNumber: result.Height,
FinalizeTx: &types.Finalized{},
ClaimInfo: getCrossTxClaimInfo(ctx, result.MsgHash, h.db),
}
if crossMsg, exist := crossMsgMap[result.MsgHash]; exist {
txInfo.Amount = crossMsg.Amount
@@ -117,96 +152,36 @@ func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address com
}
txHistories = append(txHistories, txInfo)
}
return txHistories, total, err
}
// GetTxsByAddress get all txs under given address
func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address common.Address, offset int, limit int) ([]*types.TxHistoryInfo, uint64, error) {
var txHistories []*types.TxHistoryInfo
utilOrm := orm.NewCrossMsg(h.db)
total, err := utilOrm.GetTotalCrossMsgCountByAddress(ctx, address.String())
if err != nil || total == 0 {
return txHistories, 0, err
}
result, err := utilOrm.GetCrossMsgsByAddressWithOffset(ctx, address.String(), offset, limit)
if err != nil {
return nil, 0, err
}
for _, msg := range result {
txHistory := &types.TxHistoryInfo{
Hash: msg.Layer1Hash + msg.Layer2Hash,
Amount: msg.Amount,
To: msg.Target,
L1Token: msg.Layer1Token,
L2Token: msg.Layer2Token,
IsL1: msg.MsgType == int(orm.Layer1Msg),
BlockNumber: msg.Height,
BlockTimestamp: msg.Timestamp,
CreatedAt: msg.CreatedAt,
FinalizeTx: &types.Finalized{
Hash: "",
},
ClaimInfo: getCrossTxClaimInfo(ctx, msg.MsgHash, h.db),
}
updateCrossTxHash(ctx, msg.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
}
return txHistories, total, nil
updateL2TxClaimInfo(ctx, txHistories, h.db)
return txHistories, uint64(len(results)), err
}
// GetTxsByHashes get tx infos under given tx hashes
func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, hashes []string) ([]*types.TxHistoryInfo, error) {
txHistories := make([]*types.TxHistoryInfo, 0)
CrossMsgOrm := orm.NewCrossMsg(h.db)
for _, hash := range hashes {
l1result, err := CrossMsgOrm.GetL1CrossMsgByHash(ctx, common.HexToHash(hash))
if err != nil {
return nil, err
}
if l1result != nil {
txHistory := &types.TxHistoryInfo{
Hash: l1result.Layer1Hash,
Amount: l1result.Amount,
To: l1result.Target,
IsL1: true,
L1Token: l1result.Layer1Token,
L2Token: l1result.Layer2Token,
BlockNumber: l1result.Height,
BlockTimestamp: l1result.Timestamp,
CreatedAt: l1result.CreatedAt,
FinalizeTx: &types.Finalized{
Hash: "",
},
}
updateCrossTxHash(ctx, l1result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
continue
}
l2result, err := CrossMsgOrm.GetL2CrossMsgByHash(ctx, common.HexToHash(hash))
if err != nil {
return nil, err
}
if l2result != nil {
txHistory := &types.TxHistoryInfo{
Hash: l2result.Layer2Hash,
Amount: l2result.Amount,
To: l2result.Target,
IsL1: false,
L1Token: l2result.Layer1Token,
L2Token: l2result.Layer2Token,
BlockNumber: l2result.Height,
BlockTimestamp: l2result.Timestamp,
CreatedAt: l2result.CreatedAt,
FinalizeTx: &types.Finalized{
Hash: "",
},
ClaimInfo: getCrossTxClaimInfo(ctx, l2result.MsgHash, h.db),
}
updateCrossTxHash(ctx, l2result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
continue
}
results, err := CrossMsgOrm.GetCrossMsgsByHashes(ctx, hashes)
if err != nil {
return nil, err
}
var txHistories []*types.TxHistoryInfo
for _, result := range results {
txHistory := &types.TxHistoryInfo{
Hash: result.Layer1Hash + result.Layer2Hash,
MsgHash: result.MsgHash,
Amount: result.Amount,
To: result.Target,
L1Token: result.Layer1Token,
L2Token: result.Layer2Token,
IsL1: orm.MsgType(result.MsgType) == orm.Layer1Msg,
BlockNumber: result.Height,
BlockTimestamp: result.Timestamp,
CreatedAt: result.CreatedAt,
FinalizeTx: &types.Finalized{Hash: ""},
}
txHistories = append(txHistories, txHistory)
}
updateCrossTxHashesAndL2TxClaimInfo(ctx, txHistories, h.db)
return txHistories, nil
}

View File

@@ -5,13 +5,15 @@ import (
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"bridge-history-api/config"
"bridge-history-api/internal/controller"
"bridge-history-api/observability"
)
// Route routes the APIs
func Route(router *gin.Engine, conf *config.Config) {
func Route(router *gin.Engine, conf *config.Config, reg prometheus.Registerer) {
router.Use(cors.New(cors.Config{
AllowOrigins: []string{"*"},
AllowMethods: []string{"GET", "POST", "PUT", "DELETE"},
@@ -20,9 +22,9 @@ func Route(router *gin.Engine, conf *config.Config) {
MaxAge: 12 * time.Hour,
}))
observability.Use(router, "bridge_history_api", reg)
r := router.Group("api/")
r.GET("/txs", controller.HistoryCtrler.GetAllTxsByAddr)
r.POST("/txsbyhashes", controller.HistoryCtrler.PostQueryTxsByHash)
r.GET("/claimable", controller.HistoryCtrler.GetAllClaimableTxsByAddr)
r.GET("/withdraw_root", controller.BatchCtrler.GetWithdrawRootByBatchIndex)
}

View File

@@ -10,6 +10,8 @@ import (
const (
// Success shows OK.
Success = 0
// InternalServerError shows a fatal error in the server
InternalServerError = 500
// ErrParameterInvalidNo is invalid params
ErrParameterInvalidNo = 40001
// ErrGetClaimablesFailure is getting all claimables txs error
@@ -24,9 +26,7 @@ const (
// QueryByAddressRequest the request parameter of address api
type QueryByAddressRequest struct {
Address string `form:"address" binding:"required"`
Page int `form:"page" binding:"required"`
PageSize int `form:"page_size" binding:"required"`
Address string `form:"address" binding:"required"`
}
// QueryByHashRequest the request parameter of hash api
@@ -78,6 +78,7 @@ type UserClaimInfo struct {
// TxHistoryInfo the schema of tx history infos
type TxHistoryInfo struct {
Hash string `json:"hash"`
MsgHash string `json:"msgHash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
@@ -103,3 +104,28 @@ func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
}
ctx.JSON(http.StatusOK, renderData)
}
// RenderSuccess renders success response with json
func RenderSuccess(ctx *gin.Context, data interface{}) {
RenderJSON(ctx, Success, nil, data)
}
// RenderFailure renders failure response with json
func RenderFailure(ctx *gin.Context, errCode int, err error) {
RenderJSON(ctx, errCode, err, nil)
}
// RenderFatal renders fatal response with json
func RenderFatal(ctx *gin.Context, err error) {
var errMsg string
if err != nil {
errMsg = err.Error()
}
renderData := Response{
ErrCode: InternalServerError,
ErrMsg: errMsg,
Data: nil,
}
ctx.Set("errcode", InternalServerError)
ctx.JSON(http.StatusInternalServerError, renderData)
}

View File

@@ -1,10 +1,10 @@
package metrics
package observability
import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"scroll-tech/common/metrics/ginmetrics"
"bridge-history-api/observability/ginmetrics"
)
// Use register the gin metric

View File

@@ -0,0 +1,35 @@
package observability
import (
"github.com/gin-gonic/gin"
"gorm.io/gorm"
"bridge-history-api/internal/types"
"bridge-history-api/utils"
)
// ProbesController probe check controller
type ProbesController struct {
db *gorm.DB
}
// NewProbesController returns an ProbesController instance
func NewProbesController(db *gorm.DB) *ProbesController {
return &ProbesController{
db: db,
}
}
// HealthCheck the api controller for health check
func (a *ProbesController) HealthCheck(c *gin.Context) {
if _, err := utils.Ping(a.db); err != nil {
types.RenderFatal(c, err)
return
}
types.RenderSuccess(c, nil)
}
// Ready the api controller for ready check
func (a *ProbesController) Ready(c *gin.Context) {
types.RenderSuccess(c, nil)
}

View File

@@ -0,0 +1,53 @@
package observability
import (
"errors"
"fmt"
"net/http"
"time"
// enable the pprof
_ "net/http/pprof"
"github.com/ethereum/go-ethereum/log"
"github.com/gin-contrib/pprof"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/urfave/cli/v2"
"gorm.io/gorm"
"bridge-history-api/utils"
)
// Server starts the metrics server on the given address, will be closed when the given
// context is canceled.
func Server(c *cli.Context, db *gorm.DB) {
if !c.Bool(utils.MetricsEnabled.Name) {
return
}
r := gin.New()
r.Use(gin.Recovery())
pprof.Register(r)
r.GET("/metrics", func(context *gin.Context) {
promhttp.Handler().ServeHTTP(context.Writer, context.Request)
})
probeController := NewProbesController(db)
r.GET("/health", probeController.HealthCheck)
r.GET("/ready", probeController.Ready)
address := fmt.Sprintf(":%s", c.String(utils.MetricsPort.Name))
server := &http.Server{
Addr: address,
Handler: r,
ReadHeaderTimeout: time.Minute,
}
log.Info("Starting metrics server", "address", address)
go func() {
if runServerErr := server.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
log.Crit("run metrics http server failure", "error", runServerErr)
}
}()
}

View File

@@ -71,6 +71,16 @@ func (r *RollupBatch) GetRollupBatchByIndex(ctx context.Context, index uint64) (
return &result, nil
}
// GetRollupBatchesByIndexes return the rollup batches by indexes
func (r *RollupBatch) GetRollupBatchesByIndexes(ctx context.Context, indexes []uint64) ([]*RollupBatch, error) {
var results []*RollupBatch
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_index IN (?)", indexes).Find(&results).Error
if err != nil {
return nil, fmt.Errorf("RollupBatch.GetRollupBatchesByIndexes error: %w", err)
}
return results, nil
}
// InsertRollupBatch batch insert rollup batch into db and return the transaction
func (r *RollupBatch) InsertRollupBatch(ctx context.Context, batches []*RollupBatch, dbTx ...*gorm.DB) error {
if len(batches) == 0 {

View File

@@ -368,3 +368,14 @@ func (c *CrossMsg) GetCrossMsgsByAddressWithOffset(ctx context.Context, sender s
}
return messages, nil
}
// GetCrossMsgsByHashes retrieves a list of cross messages identified by their Layer 1 or Layer 2 hashes.
func (c *CrossMsg) GetCrossMsgsByHashes(ctx context.Context, hashes []string) ([]*CrossMsg, error) {
var results []*CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer1_hash IN (?) OR layer2_hash IN (?)", hashes, hashes).Find(&results).Error
if err != nil {
return nil, fmt.Errorf("CrossMsg.GetCrossMsgsByHashes error: %w", err)
}
return results, nil
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"strings"
"time"
"github.com/ethereum/go-ethereum/log"
@@ -54,6 +55,19 @@ func (l *L2SentMsg) GetL2SentMsgByHash(ctx context.Context, msgHash string) (*L2
return &result, nil
}
// GetL2SentMsgsByHashes get l2 sent msgs by hashes
func (l *L2SentMsg) GetL2SentMsgsByHashes(ctx context.Context, msgHashes []string) ([]*L2SentMsg, error) {
var results []*L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("msg_hash IN (?)", msgHashes).
Find(&results).
Error
if err != nil {
return nil, fmt.Errorf("L2SentMsg.GetL2SentMsgsByHashes error: %w", err)
}
return results, nil
}
// GetLatestSentMsgHeightOnL2 get latest sent msg height on l2
func (l *L2SentMsg) GetLatestSentMsgHeightOnL2(ctx context.Context) (uint64, error) {
var result L2SentMsg
@@ -72,26 +86,61 @@ func (l *L2SentMsg) GetLatestSentMsgHeightOnL2(ctx context.Context) (uint64, err
return result.Height, nil
}
// GetClaimableL2SentMsgByAddressWithOffset get claimable l2 sent msg by address with offset
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressWithOffset(ctx context.Context, address string, offset int, limit int) ([]*L2SentMsg, error) {
var results []*L2SentMsg
err := l.db.WithContext(ctx).Raw(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='' ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset).
Scan(&results).Error
if err != nil {
return nil, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressWithOffset error: %w", err)
// GetClaimableL2SentMsgByAddress returns both the total number of unclaimed messages and a paginated list of those messages.
// TODO: Add metrics about the result set sizes (total/claimed/unclaimed messages).
func (l *L2SentMsg) GetClaimableL2SentMsgByAddress(ctx context.Context, address string) ([]*L2SentMsg, error) {
var totalMsgs []*L2SentMsg
db := l.db.WithContext(ctx)
db = db.Table("l2_sent_msg")
db = db.Where("original_sender = ? OR sender = ?", address, address)
db = db.Where("msg_proof != ''")
db = db.Where("deleted_at IS NULL")
db = db.Order("id DESC")
tx := db.Find(&totalMsgs)
if tx.Error != nil || tx.RowsAffected == 0 {
return nil, tx.Error
}
return results, nil
}
// GetClaimableL2SentMsgByAddressTotalNum get claimable l2 sent msg by address total num
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressTotalNum(ctx context.Context, address string) (uint64, error) {
var count uint64
err := l.db.WithContext(ctx).Raw(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='';`, address).
Scan(&count).Error
if err != nil {
return 0, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressTotalNum error: %w", err)
// Note on the use of IN vs VALUES in SQL Queries:
// ------------------------------------------------
// When using the IN predicate with a large list (>100) of values, performance may suffer.
// An alternative approach is to use constant subqueries with the VALUES construct.
// For more details and optimization tips, visit:
// https://postgres.cz/wiki/PostgreSQL_SQL_Tricks_I#Predicate_IN_optimalization
//
// Example using IN:
// SELECT * FROM tab WHERE x IN (1,2,3,...,n); -- where n > 70
//
// Optimized example using VALUES:
// SELECT * FROM tab WHERE x IN (VALUES(10), (20));
//
var valuesStr string
for _, msg := range totalMsgs {
valuesStr += fmt.Sprintf("('%s'),", msg.MsgHash)
}
return count, nil
valuesStr = strings.TrimSuffix(valuesStr, ",")
var claimedMsgHashes []string
db = l.db.WithContext(ctx)
db = db.Table("relayed_msg")
db = db.Where(fmt.Sprintf("msg_hash IN (VALUES %s)", valuesStr))
db = db.Where("deleted_at IS NULL")
if err := db.Pluck("msg_hash", &claimedMsgHashes).Error; err != nil {
return nil, err
}
claimedMsgHashSet := make(map[string]struct{})
for _, hash := range claimedMsgHashes {
claimedMsgHashSet[hash] = struct{}{}
}
var unclaimedL2Msgs []*L2SentMsg
for _, msg := range totalMsgs {
if _, found := claimedMsgHashSet[msg.MsgHash]; !found {
unclaimedL2Msgs = append(unclaimedL2Msgs, msg)
}
}
return unclaimedL2Msgs, nil
}
// GetLatestL2SentMsgBatchIndex get latest l2 sent msg batch index

View File

@@ -0,0 +1,77 @@
package orm
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"bridge-history-api/orm/migrate"
"scroll-tech/common/database"
"scroll-tech/common/docker"
)
func TestGetClaimableL2SentMsgByAddress(t *testing.T) {
base := docker.NewDockerApp()
base.RunDBImage(t)
db, err := database.InitDB(
&database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
},
)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
l2SentMsgOrm := NewL2SentMsg(db)
relayedMsgOrm := NewRelayedMsg(db)
msgs, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddress(context.Background(), "sender1")
assert.NoError(t, err)
assert.Len(t, msgs, 0)
l2SentMsgs := []*L2SentMsg{
{
Sender: "sender1",
MsgHash: "hash1",
MsgProof: "proof1",
Nonce: 0,
},
{
OriginalSender: "sender1",
MsgHash: "hash2",
MsgProof: "proof2",
Nonce: 1,
},
{
OriginalSender: "sender1",
MsgHash: "hash3",
MsgProof: "",
Nonce: 2,
},
}
relayedMsgs := []*RelayedMsg{
{
MsgHash: "hash2",
},
{
MsgHash: "hash3",
},
}
err = l2SentMsgOrm.InsertL2SentMsg(context.Background(), l2SentMsgs)
assert.NoError(t, err)
err = relayedMsgOrm.InsertRelayedMsg(context.Background(), relayedMsgs)
assert.NoError(t, err)
msgs, err = l2SentMsgOrm.GetClaimableL2SentMsgByAddress(context.Background(), "sender1")
assert.NoError(t, err)
assert.Len(t, msgs, 1)
assert.Equal(t, "hash1", msgs[0].MsgHash)
}

View File

@@ -49,6 +49,19 @@ func (r *RelayedMsg) GetRelayedMsgByHash(ctx context.Context, msgHash string) (*
return &result, nil
}
// GetRelayedMsgsByHashes get relayed msg by hash array
func (r *RelayedMsg) GetRelayedMsgsByHashes(ctx context.Context, msgHashes []string) ([]*RelayedMsg, error) {
var results []*RelayedMsg
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
Where("msg_hash IN (?)", msgHashes).
Find(&results).
Error
if err != nil {
return nil, fmt.Errorf("RelayedMsg.GetRelayedMsgsByHashes error: %w", err)
}
return results, nil
}
// GetLatestRelayedHeightOnL1 get latest relayed height on l1
func (r *RelayedMsg) GetLatestRelayedHeightOnL1(ctx context.Context) (uint64, error) {
var result RelayedMsg

View File

@@ -2,6 +2,7 @@ package utils
import (
"context"
"database/sql"
"fmt"
"time"
@@ -67,18 +68,29 @@ func InitDB(config *config.DBConfig) (*gorm.DB, error) {
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
sqlDB, pingErr := Ping(db)
if pingErr != nil {
return nil, pingErr
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
return db, nil
}
// Ping check db status
func Ping(db *gorm.DB) (*sql.DB, error) {
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
return sqlDB, nil
}
// CloseDB close the db handler. notice the db handler only can close when then program exit.

View File

@@ -14,14 +14,6 @@ import (
"bridge-history-api/orm"
)
// CachedParsedTxCalldata store parsed batch infos
type CachedParsedTxCalldata struct {
CallDataIndex uint64
BatchIndices []uint64
StartBlocks []uint64
EndBlocks []uint64
}
// ParseBackendL1EventLogs parses L1 watched events
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log

View File

@@ -1,39 +0,0 @@
# Bridge
This repo contains the Scroll bridge.
## Dependency
+ install `abigen`
``` bash
go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
```
## Build
```bash
make clean
make mock_abi
make bridge_bins
```
## Start
(Note: make sure you use different private keys for different senders in config.json.)
* use default ports and config.json.
```bash
./build/bin/event_watcher --http
./build/bin/gas_oracle --http
./build/bin/rollup_relayer --http
```
* use specified ports and config.json
```bash
./build/bin/event_watcher --config ./config.json --http --http.addr localhost --http.port 8290
./build/bin/gas_oracle --config ./config.json --http --http.addr localhost --http.port 8290
./build/bin/rollup_relayer --config ./config.json --http --http.addr localhost --http.port 8290
```

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge/cmd/event_watcher/app"
func main() {
app.Run()
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge/cmd/gas_oracle/app"
func main() {
app.Run()
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge/cmd/rollup_relayer/app"
func main() {
app.Run()
}

View File

@@ -1,54 +0,0 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l1RelayerMetrics struct {
bridgeL1RelayedMsgsTotal prometheus.Counter
bridgeL1RelayedMsgsFailureTotal prometheus.Counter
bridgeL1RelayerGasPriceOraclerRunTotal prometheus.Counter
bridgeL1RelayerLastGasPrice prometheus.Gauge
bridgeL1MsgsRelayedConfirmedTotal prometheus.Counter
bridgeL1GasOraclerConfirmedTotal prometheus.Counter
}
var (
initL1RelayerMetricOnce sync.Once
l1RelayerMetric *l1RelayerMetrics
)
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
initL1RelayerMetricOnce.Do(func() {
l1RelayerMetric = &l1RelayerMetrics{
bridgeL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_msg_relayed_total",
Help: "The total number of the l1 relayed message.",
}),
bridgeL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_msg_relayed_failure_total",
Help: "The total number of the l1 relayed failure message.",
}),
bridgeL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_relayed_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
bridgeL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_gas_price_oracler_total",
Help: "The total number of layer1 gas price oracler run total",
}),
bridgeL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_layer1_gas_price_latest_gas_price",
Help: "The latest gas price of bridge relayer l1",
}),
bridgeL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_gas_oracler_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
}
})
return l1RelayerMetric
}

View File

@@ -1,84 +0,0 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l2RelayerMetrics struct {
bridgeL2RelayerProcessPendingBatchTotal prometheus.Counter
bridgeL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
bridgeL2RelayerGasPriceOraclerRunTotal prometheus.Counter
bridgeL2RelayerLastGasPrice prometheus.Gauge
bridgeL2RelayerProcessCommittedBatchesTotal prometheus.Counter
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
bridgeL2ChainMonitorLatestFailedCall prometheus.Counter
bridgeL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
}
var (
initL2RelayerMetricOnce sync.Once
l2RelayerMetric *l2RelayerMetrics
)
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
initL2RelayerMetricOnce.Do(func() {
l2RelayerMetric = &l2RelayerMetrics{
bridgeL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_pending_batch_total",
Help: "The total number of layer2 process pending batch",
}),
bridgeL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_pending_batch_success_total",
Help: "The total number of layer2 process pending success batch",
}),
bridgeL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_gas_price_oracler_total",
Help: "The total number of layer2 gas price oracler run total",
}),
bridgeL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_layer2_gas_price_latest_gas_price",
Help: "The latest gas price of bridge relayer l2",
}),
bridgeL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_total",
Help: "The total number of layer2 process committed batches run total",
}),
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_finalized_total",
Help: "The total number of layer2 process committed batches finalized total",
}),
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_finalized_success_total",
Help: "The total number of layer2 process committed batches finalized success total",
}),
bridgeL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_confirmed_total",
Help: "The total number of layer2 process committed batches confirmed total",
}),
bridgeL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_finalized_batches_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
bridgeL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
bridgeL2ChainMonitorLatestFailedCall: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_chain_monitor_latest_failed_batch_call",
Help: "The total number of failed call chain_monitor api",
}),
bridgeL2ChainMonitorLatestFailedBatchStatus: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_chain_monitor_latest_failed_batch_status",
Help: "The total number of failed batch status get from chain_monitor",
}),
}
})
return l2RelayerMetric
}

View File

@@ -1,65 +0,0 @@
package watcher
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
)
// TODO: Add unit tests that the limits are enforced correctly.
func testBatchProposer(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
cp.TryProposeChunk()
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
}, db, nil)
bp.TryProposeBatch()
batchOrm := orm.NewBatch(db)
// get all batches.
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, 1)
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
assert.Equal(t, uint64(0), batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
chunkOrm := orm.NewChunk(db)
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, batches, 1)
assert.Equal(t, batches[0].Hash, dbChunks[0].BatchHash)
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(dbChunks[0].ProvingStatus))
blockOrm := orm.NewL2Block(db)
blocks, err := blockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, dbChunks[0].Hash, blocks[0].ChunkHash)
assert.Equal(t, dbChunks[0].Hash, blocks[1].ChunkHash)
}

View File

@@ -1,68 +0,0 @@
package watcher
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
)
// TODO: Add unit tests that the limits are enforced correctly.
func testChunkProposer(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
cp.TryProposeChunk()
expectedChunk := &types.Chunk{
Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2},
}
expectedHash, err := expectedChunk.Hash(0)
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1)
assert.Equal(t, expectedHash.Hex(), chunks[0].Hash)
}
func testChunkProposerRowConsumption(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 0, // !
ChunkTimeoutSec: 300,
}, db, nil)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 0)
}

View File

@@ -20,11 +20,10 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
FROM scrolltech/go-rust-builder:go-1.19-rust-nightly-2022-12-10 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/

View File

@@ -3,11 +3,10 @@ FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/

View File

@@ -3,11 +3,10 @@ FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
@@ -18,7 +17,7 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
cd /src/rollup/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
# Pull event_watcher into a second stage deploy alpine container
FROM alpine:latest

View File

@@ -3,11 +3,10 @@ FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
@@ -18,7 +17,7 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
cd /src/rollup/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
# Pull gas_oracle into a second stage deploy alpine container
FROM alpine:latest

View File

@@ -1,31 +0,0 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
# Support mainland environment.
#ENV GOPROXY="https://goproxy.cn,direct"
RUN go mod download -x
# Build prover-stats-api
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/prover-stats-api/cmd/ && go build -v -p 4 -o /bin/prover-stats-api
# Pull prover-stats-api into a second stage deploy alpine container \
FROM alpine:latest
COPY --from=builder /bin/prover-stats-api /bin/
ENTRYPOINT ["prover-stats-api"]

View File

@@ -3,11 +3,10 @@ FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
@@ -18,7 +17,7 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
cd /src/rollup/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy alpine container
FROM alpine:latest

View File

@@ -3,7 +3,7 @@ set -uex
profile_name=$1
exclude_dirs=("scroll-tech/bridge/cmd" "scroll-tech/bridge/tests" "scroll-tech/bridge/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
exclude_dirs=("scroll-tech/rollup/cmd" "scroll-tech/rollup/tests" "scroll-tech/rollup/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
coverpkg="scroll-tech/${profile_name}"

View File

@@ -6,7 +6,7 @@ flag_management:
default_rules:
carryforward: true
individual_flags:
- name: bridge
- name: rollup
statuses:
- type: project
target: auto

View File

@@ -2,6 +2,7 @@ package database
import (
"context"
"database/sql"
"fmt"
"time"
@@ -63,17 +64,15 @@ func InitDB(config *Config) (*gorm.DB, error) {
if err != nil {
return nil, err
}
sqlDB, err := db.DB()
if err != nil {
return nil, err
sqlDB, pingErr := Ping(db)
if pingErr != nil {
return nil, pingErr
}
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return db, nil
}
@@ -88,3 +87,16 @@ func CloseDB(db *gorm.DB) error {
}
return nil
}
// Ping check db status
func Ping(db *gorm.DB) (*sql.DB, error) {
sqlDB, err := db.DB()
if err != nil {
return nil, err
}
if err = sqlDB.Ping(); err != nil {
return nil, err
}
return sqlDB, nil
}

View File

@@ -11,6 +11,10 @@ import (
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/scroll-tech/go-ethereum/log"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/common/version"
)
func TestGormLogger(t *testing.T) {
@@ -33,3 +37,26 @@ func TestGormLogger(t *testing.T) {
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
}
func TestDB(t *testing.T) {
version.Version = "v4.1.98-aaa-bbb-ccc"
base := docker.NewDockerApp()
base.RunDBImage(t)
dbCfg := &Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
var err error
db, err := InitDB(dbCfg)
assert.NoError(t, err)
sqlDB, err := Ping(db)
assert.NoError(t, err)
assert.NotNil(t, sqlDB)
assert.NoError(t, CloseDB(db))
}

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,7 @@ edition = "2021"
crate-type = ["cdylib"]
[patch.crates-io]
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v0.17.0" }
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
@@ -20,9 +20,8 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
[dependencies]
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.7", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
base64 = "0.13.0"
env_logger = "0.9.0"

View File

@@ -0,0 +1,8 @@
.PHONY: help fmt clippy test test-ci test-all
fmt:
@cargo fmt --all -- --check
clippy:
@cargo check --all-features
@cargo clippy --release -- -D warnings

View File

@@ -1,15 +1,18 @@
use crate::{
types::{CheckChunkProofsResponse, ProofResult},
utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR},
utils::{
c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char,
OUTPUT_DIR,
},
};
use libc::c_char;
use prover::{
aggregator::{Prover, Verifier},
consts::AGG_VK_FILENAME,
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, ChunkHash, ChunkProof,
BatchProof, BlockTrace, ChunkHash, ChunkProof,
};
use std::{cell::OnceCell, env, panic, ptr::null};
use types::eth::BlockTrace;
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
@@ -24,6 +27,12 @@ pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
// VK file must exist, it is optional and logged as a warning in prover.
if !file_exists(assets_dir, &AGG_VK_FILENAME) {
panic!("{} must exist in folder {}", *AGG_VK_FILENAME, assets_dir);
}
let prover = Prover::from_dirs(params_dir, assets_dir);
PROVER.set(prover).unwrap();
@@ -47,7 +56,7 @@ pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_d
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
@@ -58,7 +67,7 @@ pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *const c_char {
let check_result: Result<bool, String> = panic::catch_unwind(|| {
let check_result: Result<bool, String> = panic_catch(|| {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
@@ -94,7 +103,7 @@ pub unsafe extern "C" fn gen_batch_proof(
chunk_hashes: *const c_char,
chunk_proofs: *const c_char,
) -> *const c_char {
let proof_result: Result<Vec<u8>, String> = panic::catch_unwind(|| {
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
let chunk_hashes = c_char_to_vec(chunk_hashes);
let chunk_proofs = c_char_to_vec(chunk_proofs);
@@ -143,7 +152,7 @@ pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
let verified = panic::catch_unwind(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
verified.unwrap_or(false) as c_char
}

View File

@@ -1,15 +1,18 @@
use crate::{
types::ProofResult,
utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR},
utils::{
c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char,
OUTPUT_DIR,
},
};
use libc::c_char;
use prover::{
consts::CHUNK_VK_FILENAME,
utils::init_env_and_log,
zkevm::{Prover, Verifier},
ChunkProof,
BlockTrace, ChunkProof,
};
use std::{cell::OnceCell, env, panic, ptr::null};
use types::eth::BlockTrace;
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
@@ -24,7 +27,13 @@ pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let prover = Prover::from_params_dir(params_dir);
// VK file must exist, it is optional and logged as a warning in prover.
if !file_exists(assets_dir, &CHUNK_VK_FILENAME) {
panic!("{} must exist in folder {}", *CHUNK_VK_FILENAME, assets_dir);
}
let prover = Prover::from_dirs(params_dir, assets_dir);
PROVER.set(prover).unwrap();
}
@@ -47,7 +56,7 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
@@ -58,7 +67,7 @@ pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
let proof_result: Result<Vec<u8>, String> = panic::catch_unwind(|| {
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
let block_traces = c_char_to_vec(block_traces);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
@@ -66,7 +75,7 @@ pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const
let proof = PROVER
.get_mut()
.expect("failed to get mutable reference to PROVER.")
.gen_chunk_proof(block_traces, None, OUTPUT_DIR.as_deref())
.gen_chunk_proof(block_traces, None, None, OUTPUT_DIR.as_deref())
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
@@ -93,6 +102,6 @@ pub unsafe extern "C" fn verify_chunk_proof(proof: *const c_char) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
let verified = panic::catch_unwind(|| VERIFIER.get().unwrap().verify_chunk_proof(proof));
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_chunk_proof(proof));
verified.unwrap_or(false) as c_char
}

View File

@@ -3,6 +3,8 @@ use std::{
env,
ffi::{CStr, CString},
os::raw::c_char,
panic::{catch_unwind, AssertUnwindSafe},
path::PathBuf,
};
// Only used for debugging.
@@ -26,3 +28,22 @@ pub(crate) fn string_to_c_char(string: String) -> *const c_char {
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
CString::new(bytes).unwrap().into_raw()
}
pub(crate) fn file_exists(dir: &str, filename: &str) -> bool {
let mut path = PathBuf::from(dir);
path.push(filename);
path.exists()
}
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
if let Some(s) = err.downcast_ref::<String>() {
s.to_string()
} else if let Some(s) = err.downcast_ref::<&str>() {
s.to_string()
} else {
format!("unable to get panic info {err:?}")
}
})
}

View File

@@ -0,0 +1,57 @@
package ginmetrics
import (
"github.com/bits-and-blooms/bitset"
)
const defaultSize = 2 << 24
var seeds = []uint{7, 11, 13, 31, 37, 61}
// BloomFilter a simple bloom filter
type BloomFilter struct {
Set *bitset.BitSet
Funcs [6]simpleHash
}
// NewBloomFilter new a BloomFilter
func NewBloomFilter() *BloomFilter {
bf := new(BloomFilter)
for i := 0; i < len(bf.Funcs); i++ {
bf.Funcs[i] = simpleHash{defaultSize, seeds[i]}
}
bf.Set = bitset.New(defaultSize)
return bf
}
// Add a value to BloomFilter
func (bf *BloomFilter) Add(value string) {
for _, f := range bf.Funcs {
bf.Set.Set(f.hash(value))
}
}
// Contains check the value is in bloom filter
func (bf *BloomFilter) Contains(value string) bool {
if value == "" {
return false
}
ret := true
for _, f := range bf.Funcs {
ret = ret && bf.Set.Test(f.hash(value))
}
return ret
}
type simpleHash struct {
Cap uint
Seed uint
}
func (s *simpleHash) hash(value string) uint {
var result uint = 0
for i := 0; i < len(value); i++ {
result = result*s.Seed + uint(value[i])
}
return (s.Cap - 1) & result
}

View File

@@ -0,0 +1,89 @@
package ginmetrics
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
)
// Metric defines a metric object. Users can use it to save
// metric data. Every metric should be globally unique by name.
type Metric struct {
Type MetricType
Name string
Description string
Labels []string
Buckets []float64
Objectives map[float64]float64
vec prometheus.Collector
}
// SetGaugeValue set data for Gauge type Metric.
func (m *Metric) SetGaugeValue(labelValues []string, value float64) error {
if m.Type == None {
return fmt.Errorf("metric %s not existed", m.Name)
}
if m.Type != Gauge {
return fmt.Errorf("metric %s not Gauge type", m.Name)
}
m.vec.(*prometheus.GaugeVec).WithLabelValues(labelValues...).Set(value)
return nil
}
// Inc increases value for Counter/Gauge type metric, increments
// the counter by 1
func (m *Metric) Inc(labelValues []string) error {
if m.Type == None {
return fmt.Errorf("metric %s not existed", m.Name)
}
if m.Type != Gauge && m.Type != Counter {
return fmt.Errorf("metric %s not Gauge or Counter type", m.Name)
}
switch m.Type {
case Counter:
m.vec.(*prometheus.CounterVec).WithLabelValues(labelValues...).Inc()
case Gauge:
m.vec.(*prometheus.GaugeVec).WithLabelValues(labelValues...).Inc()
}
return nil
}
// Add adds the given value to the Metric object. Only
// for Counter/Gauge type metric.
func (m *Metric) Add(labelValues []string, value float64) error {
if m.Type == None {
return fmt.Errorf("metric %s not existed", m.Name)
}
if m.Type != Gauge && m.Type != Counter {
return fmt.Errorf("metric %s not Gauge or Counter type", m.Name)
}
switch m.Type {
case Counter:
m.vec.(*prometheus.CounterVec).WithLabelValues(labelValues...).Add(value)
case Gauge:
m.vec.(*prometheus.GaugeVec).WithLabelValues(labelValues...).Add(value)
}
return nil
}
// Observe is used by Histogram and Summary type metric to
// add observations.
func (m *Metric) Observe(labelValues []string, value float64) error {
if m.Type == 0 {
return fmt.Errorf("metric %s not existed", m.Name)
}
if m.Type != Histogram && m.Type != Summary {
return fmt.Errorf("metric %s not Histogram or Summary type", m.Name)
}
switch m.Type {
case Histogram:
m.vec.(*prometheus.HistogramVec).WithLabelValues(labelValues...).Observe(value)
case Summary:
m.vec.(*prometheus.SummaryVec).WithLabelValues(labelValues...).Observe(value)
}
return nil
}

View File

@@ -0,0 +1,155 @@
package ginmetrics
import (
"fmt"
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
metricRequestTotal = "request_total"
metricRequestUVTotal = "request_uv_total"
metricURIRequestTotal = "uri_request_total"
metricRequestBody = "request_body_total"
metricResponseBody = "response_body_total"
metricRequestDuration = "request_duration"
metricSlowRequest = "slow_request_total"
bloomFilter *BloomFilter
)
// Use set gin metrics middleware
func (m *Monitor) Use(r gin.IRoutes) {
m.initGinMetrics()
r.Use(m.monitorInterceptor)
r.GET(m.metricPath, func(ctx *gin.Context) {
promhttp.Handler().ServeHTTP(ctx.Writer, ctx.Request)
})
}
// UseWithoutExposingEndpoint is used to add monitor interceptor to gin router
// It can be called multiple times to intercept from multiple gin.IRoutes
// http path is not set, to do that use Expose function
func (m *Monitor) UseWithoutExposingEndpoint(r gin.IRoutes) {
m.initGinMetrics()
r.Use(m.monitorInterceptor)
}
// Expose adds metric path to a given router.
// The router can be different with the one passed to UseWithoutExposingEndpoint.
// This allows to expose metrics on different port.
func (m *Monitor) Expose(r gin.IRoutes) {
r.GET(m.metricPath, func(ctx *gin.Context) {
promhttp.Handler().ServeHTTP(ctx.Writer, ctx.Request)
})
}
// initGinMetrics used to init gin metrics
func (m *Monitor) initGinMetrics() {
bloomFilter = NewBloomFilter()
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricRequestTotal,
Description: "all the server received request num.",
Labels: nil,
})
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricRequestUVTotal,
Description: "all the server received ip num.",
Labels: nil,
})
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricURIRequestTotal,
Description: "all the server received request num with every uri.",
Labels: []string{"uri", "method", "code"},
})
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricRequestBody,
Description: "the server received request body size, unit byte",
Labels: nil,
})
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricResponseBody,
Description: "the server send response body size, unit byte",
Labels: nil,
})
_ = monitor.AddMetric(&Metric{
Type: Histogram,
Name: metricRequestDuration,
Description: "the time server took to handle the request.",
Labels: []string{"uri"},
Buckets: m.reqDuration,
})
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricSlowRequest,
Description: fmt.Sprintf("the server handled slow requests counter, t=%d.", m.slowTime),
Labels: []string{"uri", "method", "code"},
})
}
// monitorInterceptor as gin monitor middleware.
func (m *Monitor) monitorInterceptor(ctx *gin.Context) {
if ctx.Request.URL.Path == m.metricPath {
ctx.Next()
return
}
startTime := time.Now()
// execute normal process.
ctx.Next()
// after request
m.ginMetricHandle(ctx, startTime)
}
func (m *Monitor) ginMetricHandle(ctx *gin.Context, start time.Time) {
r := ctx.Request
w := ctx.Writer
//set request total
_ = m.GetMetric(metricRequestTotal).Inc(nil)
// set uv
if clientIP := ctx.ClientIP(); !bloomFilter.Contains(clientIP) {
bloomFilter.Add(clientIP)
_ = m.GetMetric(metricRequestUVTotal).Inc(nil)
}
errCode := strconv.Itoa(ctx.GetInt("errcode"))
if len(errCode) == 0 {
errCode = strconv.Itoa(w.Status())
}
// set uri request total
_ = m.GetMetric(metricURIRequestTotal).Inc([]string{ctx.FullPath(), r.Method, errCode})
// set request body size
// since r.ContentLength can be negative (in some occasions) guard the operation
if r.ContentLength >= 0 {
_ = m.GetMetric(metricRequestBody).Add(nil, float64(r.ContentLength))
}
// set slow request
latency := time.Since(start)
if int32(latency.Seconds()) > m.slowTime {
_ = m.GetMetric(metricSlowRequest).Inc([]string{ctx.FullPath(), r.Method, strconv.Itoa(w.Status())})
}
// set request duration
_ = m.GetMetric(metricRequestDuration).Observe([]string{ctx.FullPath()}, latency.Seconds())
// set response size
if w.Size() > 0 {
_ = m.GetMetric(metricResponseBody).Add(nil, float64(w.Size()))
}
}

View File

@@ -0,0 +1,158 @@
package ginmetrics
import (
"errors"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// MetricType define metric type
type MetricType int
const (
// None unknown metric type
None MetricType = iota
// Counter MetricType
Counter
// Gauge MetricType
Gauge
// Histogram MetricType
Histogram
// Summary MetricType
Summary
defaultMetricPath = "/debug/metrics"
defaultSlowTime = int32(5)
)
var (
defaultDuration = []float64{0.1, 0.3, 1.2, 5, 10}
monitor *Monitor
promTypeHandler = map[MetricType]func(metric *Metric, reg prometheus.Registerer){
Counter: counterHandler,
Gauge: gaugeHandler,
Histogram: histogramHandler,
Summary: summaryHandler,
}
)
// Monitor is an object that uses to set gin server monitor.
type Monitor struct {
slowTime int32
metricPath string
reqDuration []float64
metrics map[string]*Metric
register prometheus.Registerer
}
// GetMonitor used to get global Monitor object,
// this function returns a singleton object.
func GetMonitor(reg prometheus.Registerer) *Monitor {
if monitor == nil {
monitor = &Monitor{
metricPath: defaultMetricPath,
slowTime: defaultSlowTime,
reqDuration: defaultDuration,
metrics: make(map[string]*Metric),
register: reg,
}
}
return monitor
}
// GetMetric used to get metric object by metric_name.
func (m *Monitor) GetMetric(name string) *Metric {
if metric, ok := m.metrics[name]; ok {
return metric
}
return &Metric{}
}
// SetMetricPath set metricPath property. metricPath is used for Prometheus
// to get gin server monitoring data.
func (m *Monitor) SetMetricPath(path string) {
m.metricPath = path
}
// SetSlowTime set slowTime property. slowTime is used to determine whether
// the request is slow. For "gin_slow_request_total" metric.
func (m *Monitor) SetSlowTime(slowTime int32) {
m.slowTime = slowTime
}
// SetDuration set reqDuration property. reqDuration is used to ginRequestDuration
// metric buckets.
func (m *Monitor) SetDuration(duration []float64) {
m.reqDuration = duration
}
// SetMetricPrefix set the metric prefix
func (m *Monitor) SetMetricPrefix(prefix string) {
metricRequestTotal = prefix + metricRequestTotal
metricRequestUVTotal = prefix + metricRequestUVTotal
metricURIRequestTotal = prefix + metricURIRequestTotal
metricRequestBody = prefix + metricRequestBody
metricResponseBody = prefix + metricResponseBody
metricRequestDuration = prefix + metricRequestDuration
metricSlowRequest = prefix + metricSlowRequest
}
// SetMetricSuffix set the metric suffix
func (m *Monitor) SetMetricSuffix(suffix string) {
metricRequestTotal += suffix
metricRequestUVTotal += suffix
metricURIRequestTotal += suffix
metricRequestBody += suffix
metricResponseBody += suffix
metricRequestDuration += suffix
metricSlowRequest += suffix
}
// AddMetric add custom monitor metric.
func (m *Monitor) AddMetric(metric *Metric) error {
if _, ok := m.metrics[metric.Name]; ok {
return fmt.Errorf("metric %s is existed", metric.Name)
}
if metric.Name == "" {
return errors.New("metric name cannot be empty")
}
if f, ok := promTypeHandler[metric.Type]; ok {
f(metric, m.register)
m.metrics[metric.Name] = metric
}
return nil
}
func counterHandler(metric *Metric, register prometheus.Registerer) {
metric.vec = promauto.With(register).NewCounterVec(
prometheus.CounterOpts{Name: metric.Name, Help: metric.Description},
metric.Labels,
)
}
func gaugeHandler(metric *Metric, register prometheus.Registerer) {
metric.vec = promauto.With(register).NewGaugeVec(
prometheus.GaugeOpts{Name: metric.Name, Help: metric.Description},
metric.Labels,
)
}
func histogramHandler(metric *Metric, register prometheus.Registerer) {
metric.vec = promauto.With(register).NewHistogramVec(
prometheus.HistogramOpts{Name: metric.Name, Help: metric.Description, Buckets: metric.Buckets},
metric.Labels,
)
}
func summaryHandler(metric *Metric, register prometheus.Registerer) {
promauto.With(register).NewSummaryVec(
prometheus.SummaryOpts{Name: metric.Name, Help: metric.Description, Objectives: metric.Objectives},
metric.Labels,
)
}

View File

@@ -0,0 +1,18 @@
package observability
import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"scroll-tech/common/observability/ginmetrics"
)
// Use register the gin metric
func Use(router *gin.Engine, metricsPrefix string, reg prometheus.Registerer) {
m := ginmetrics.GetMonitor(reg)
m.SetMetricPath("/metrics")
m.SetMetricPrefix(metricsPrefix + "_")
m.SetSlowTime(1)
m.SetDuration([]float64{0.025, .05, .1, .5, 1, 5, 10})
m.UseWithoutExposingEndpoint(router)
}

View File

@@ -0,0 +1,35 @@
package observability
import (
"github.com/gin-gonic/gin"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/types"
)
// ProbesController probe check controller
type ProbesController struct {
db *gorm.DB
}
// NewProbesController returns an ProbesController instance
func NewProbesController(db *gorm.DB) *ProbesController {
return &ProbesController{
db: db,
}
}
// HealthCheck the api controller for health check
func (a *ProbesController) HealthCheck(c *gin.Context) {
if _, err := database.Ping(a.db); err != nil {
types.RenderFatal(c, err)
return
}
types.RenderSuccess(c, nil)
}
// Ready the api controller for ready check
func (a *ProbesController) Ready(c *gin.Context) {
types.RenderSuccess(c, nil)
}

View File

@@ -1,4 +1,4 @@
package metrics
package observability
import (
"errors"
@@ -11,17 +11,17 @@ import (
"github.com/gin-contrib/pprof"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"gorm.io/gorm"
"scroll-tech/common/utils"
)
// Server starts the metrics server on the given address, will be closed when the given
// context is canceled.
func Server(c *cli.Context, reg *prometheus.Registry) {
func Server(c *cli.Context, db *gorm.DB) {
if !c.Bool(utils.MetricsEnabled.Name) {
return
}
@@ -33,6 +33,10 @@ func Server(c *cli.Context, reg *prometheus.Registry) {
promhttp.Handler().ServeHTTP(context.Writer, context.Request)
})
probeController := NewProbesController(db)
r.GET("/health", probeController.HealthCheck)
r.GET("/ready", probeController.Ready)
address := fmt.Sprintf(":%s", c.String(utils.MetricsPort.Name))
server := &http.Server{
Addr: address,

View File

@@ -15,9 +15,16 @@ import (
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
const CalldataNonZeroByteGas = 16
// GetKeccak256Gas calculates keccak256 hash gas.
// GetKeccak256Gas calculates the gas cost for computing the keccak256 hash of a given size.
func GetKeccak256Gas(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
return GetMemoryExpansionCost(size) + 30 + 6*((size+31)/32)
}
// GetMemoryExpansionCost calculates the cost of memory expansion for a given memoryByteSize.
func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
memorySizeWord := (memoryByteSize + 31) / 32
memoryCost := (memorySizeWord*memorySizeWord)/512 + (3 * memorySizeWord)
return memoryCost
}
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
@@ -131,6 +138,12 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue
total += GetMemoryExpansionCost(36) * numL1Messages // staticcall to proxy
total += 100 * numL1Messages // read admin in proxy
total += 100 * numL1Messages // read impl in proxy
total += 100 * numL1Messages // access impl
total += GetMemoryExpansionCost(36) * numL1Messages // delegatecall to impl
return total
}

View File

@@ -46,7 +46,7 @@ func TestChunkEncode(t *testing.T) {
},
}
assert.Equal(t, uint64(0), chunk.NumL1Messages(0))
assert.Equal(t, uint64(6006), chunk.EstimateL1CommitGas())
assert.Equal(t, uint64(6042), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString := hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -68,7 +68,7 @@ func TestChunkEncode(t *testing.T) {
},
}
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
assert.Equal(t, uint64(5002), chunk.EstimateL1CommitGas())
assert.Equal(t, uint64(5329), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -84,7 +84,7 @@ func TestChunkEncode(t *testing.T) {
},
}
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
assert.Equal(t, uint64(9958), chunk.EstimateL1CommitGas())
assert.Equal(t, uint64(10612), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)

View File

@@ -103,6 +103,12 @@ const (
ProverTaskFailureTypeUndefined ProverTaskFailureType = iota
// ProverTaskFailureTypeTimeout prover task failure of timeout
ProverTaskFailureTypeTimeout
// ProverTaskFailureTypeSubmitStatusNotOk prover task failure of submit status not ok
ProverTaskFailureTypeSubmitStatusNotOk
// ProverTaskFailureTypeVerifiedFailed prover task failure of verified failed by coordinator
ProverTaskFailureTypeVerifiedFailed
// ProverTaskFailureTypeServerError collect occur error
ProverTaskFailureTypeServerError
)
func (r ProverTaskFailureType) String() string {
@@ -111,8 +117,14 @@ func (r ProverTaskFailureType) String() string {
return "prover task failure undefined"
case ProverTaskFailureTypeTimeout:
return "prover task failure timeout"
case ProverTaskFailureTypeSubmitStatusNotOk:
return "prover task failure validated submit proof status not ok"
case ProverTaskFailureTypeVerifiedFailed:
return "prover task failure verified failed"
case ProverTaskFailureTypeServerError:
return "prover task failure server exception"
default:
return "illegal prover task failure type"
return fmt.Sprintf("illegal prover task failure type (%d)", int32(r))
}
}

View File

@@ -3,12 +3,21 @@ package types
const (
// Success shows OK.
Success = 0
// InternalServerError shows a fatal error in the server
InternalServerError = 500
// ErrJWTCommonErr jwt common error
ErrJWTCommonErr = 50000
// ErrJWTTokenExpired jwt token expired
ErrJWTTokenExpired = 50001
// ErrProverStatsAPIParameterInvalidNo is invalid params
ErrProverStatsAPIParameterInvalidNo = 10001
// ErrProverStatsAPIProverTaskFailure is getting prover task error
ErrProverStatsAPIProverTaskFailure = 10002
// ErrProverStatsAPIProverTotalRewardFailure is getting total rewards error
ErrProverStatsAPIProverTotalRewardFailure = 10003
// ErrCoordinatorParameterInvalidNo is invalid params
ErrCoordinatorParameterInvalidNo = 20001
// ErrCoordinatorGetTaskFailure is getting prover task error

View File

@@ -263,7 +263,7 @@ type ChunkInfo struct {
// ChunkProof includes the proof info that are required for chunk verification and rollup.
type ChunkProof struct {
StorageTrace []byte `json:"storage_trace"`
StorageTrace []byte `json:"storage_trace,omitempty"`
Protocol []byte `json:"protocol"`
Proof []byte `json:"proof"`
Instances []byte `json:"instances"`

53
common/types/response.go Normal file
View File

@@ -0,0 +1,53 @@
package types
import (
"net/http"
"github.com/gin-gonic/gin"
)
// Response the response schema
type Response struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data interface{} `json:"data"`
}
// RenderJSON renders response with json
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
var errMsg string
if err != nil {
errMsg = err.Error()
}
renderData := Response{
ErrCode: errCode,
ErrMsg: errMsg,
Data: data,
}
ctx.JSON(http.StatusOK, renderData)
}
// RenderSuccess renders success response with json
func RenderSuccess(ctx *gin.Context, data interface{}) {
RenderJSON(ctx, Success, nil, data)
}
// RenderFailure renders failure response with json
func RenderFailure(ctx *gin.Context, errCode int, err error) {
RenderJSON(ctx, errCode, err, nil)
}
// RenderFatal renders fatal response with json
func RenderFatal(ctx *gin.Context, err error) {
var errMsg string
if err != nil {
errMsg = err.Error()
}
renderData := Response{
ErrCode: InternalServerError,
ErrMsg: errMsg,
Data: nil,
}
ctx.Set("errcode", InternalServerError)
ctx.JSON(http.StatusInternalServerError, renderData)
}

View File

@@ -1,7 +1,6 @@
package version
import (
"strconv"
"strings"
)
@@ -20,36 +19,3 @@ func CheckScrollProverVersion(proverVersion string) bool {
// compare the `scroll_prover` version
return remote[2] == local[2]
}
// CheckScrollProverVersionTag check the "scroll-prover" version's tag, if it's too old, return false
func CheckScrollProverVersionTag(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
remoteTagNums := strings.Split(strings.TrimPrefix(remote[0], "v"), ".")
if len(remoteTagNums) != 3 {
return false
}
remoteTagMajor, err := strconv.Atoi(remoteTagNums[0])
if err != nil {
return false
}
remoteTagMinor, err := strconv.Atoi(remoteTagNums[1])
if err != nil {
return false
}
remoteTagPatch, err := strconv.Atoi(remoteTagNums[2])
if err != nil {
return false
}
if remoteTagMajor < 4 {
return false
}
if remoteTagMinor == 1 && remoteTagPatch < 98 {
return false
}
return true
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.2.16"
var tag = "v4.3.35"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -260,23 +260,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -371,22 +354,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -563,23 +530,6 @@ Emitted when some ERC1155 token is refunded.
| tokenId | uint256 | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -227,23 +227,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -316,22 +299,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -502,23 +469,6 @@ Emitted when some ERC721 token is refunded.
| recipient `indexed` | address | undefined |
| tokenId | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -93,28 +93,6 @@ Mapping from L1 message hash to drop status.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isL1MessageSent
```solidity
function isL1MessageSent(bytes32) external view returns (bool)
```
Mapping from L1 message hash to sent status.
#### Parameters
| Name | Type | Description |
@@ -183,6 +161,28 @@ The address of L1MessageQueue contract.
|---|---|---|
| _0 | address | undefined |
### messageSendTimestamp
```solidity
function messageSendTimestamp(bytes32) external view returns (uint256)
```
Mapping from L1 message hash to the timestamp when the message is sent.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### owner
```solidity
@@ -239,23 +239,6 @@ Mapping from queue index to previous replay queue index.
|---|---|---|
| _0 | uint256 | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of ETH rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### relayMessageWithProof
```solidity
@@ -453,22 +436,6 @@ Update max replay times.
|---|---|---|
| _newMaxReplayTimes | uint256 | The new max replay times. |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### xDomainMessageSender
```solidity
@@ -642,22 +609,5 @@ Emitted when the maximum number of times each message can be replayed is updated
| oldMaxReplayTimes | uint256 | undefined |
| newMaxReplayTimes | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -225,23 +225,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -292,22 +275,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
## Events
@@ -405,22 +372,5 @@ Emitted when some ERC20 token is refunded.
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -223,23 +223,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -290,22 +273,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
## Events
@@ -403,22 +370,5 @@ Emitted when some ERC20 token is refunded.
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -205,23 +205,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -316,22 +299,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -488,23 +455,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -174,23 +174,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -263,22 +246,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -430,23 +397,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -70,28 +70,6 @@ Mapping from L1 message hash to a boolean value indicating if the message has be
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isL2MessageSent
```solidity
function isL2MessageSent(bytes32) external view returns (bool)
```
Mapping from L2 message hash to sent status.
#### Parameters
| Name | Type | Description |
@@ -121,6 +99,28 @@ The address of L2MessageQueue.
|---|---|---|
| _0 | address | undefined |
### messageSendTimestamp
```solidity
function messageSendTimestamp(bytes32) external view returns (uint256)
```
Mapping from L2 message hash to the timestamp when the message is sent.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### owner
```solidity
@@ -155,23 +155,6 @@ function paused() external view returns (bool)
|---|---|---|
| _0 | bool | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of ETH rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### relayMessage
```solidity
@@ -290,22 +273,6 @@ Update fee vault contract.
|---|---|---|
| _newFeeVault | address | The address of new fee vault contract. |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### xDomainMessageSender
```solidity
@@ -479,22 +446,5 @@ Emitted when the maximum number of times each message can fail in L2 is updated.
| oldMaxFailedExecutionTimes | uint256 | undefined |
| newMaxFailedExecutionTimes | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -139,23 +139,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -223,22 +206,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### withdrawERC20
```solidity
@@ -354,23 +321,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### WithdrawERC20
```solidity

View File

@@ -172,23 +172,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -239,22 +222,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### withdrawERC20
```solidity
@@ -370,23 +337,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### WithdrawERC20
```solidity

View File

@@ -10,6 +10,38 @@ This contract maintains data for the Scroll rollup.
## Methods
### addProver
```solidity
function addProver(address _account) external nonpayable
```
Add an account to the prover list.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _account | address | The address of account to add. |
### addSequencer
```solidity
function addSequencer(address _account) external nonpayable
```
Add an account to the sequencer list.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _account | address | The address of account to add. |
### commitBatch
```solidity
@@ -113,7 +145,7 @@ Import layer 2 genesis block
### initialize
```solidity
function initialize(address _messageQueue, address _verifier, uint256 _maxNumL2TxInChunk) external nonpayable
function initialize(address _messageQueue, address _verifier, uint256 _maxNumTxInChunk) external nonpayable
```
@@ -126,7 +158,7 @@ function initialize(address _messageQueue, address _verifier, uint256 _maxNumL2T
|---|---|---|
| _messageQueue | address | undefined |
| _verifier | address | undefined |
| _maxNumL2TxInChunk | uint256 | undefined |
| _maxNumTxInChunk | uint256 | undefined |
### isBatchFinalized
@@ -228,10 +260,10 @@ The chain id of the corresponding layer 2 chain.
|---|---|---|
| _0 | uint64 | undefined |
### maxNumL2TxInChunk
### maxNumTxInChunk
```solidity
function maxNumL2TxInChunk() external view returns (uint256)
function maxNumTxInChunk() external view returns (uint256)
```
The maximum number of transactions allowed in each chunk.
@@ -279,6 +311,55 @@ function owner() external view returns (address)
|---|---|---|
| _0 | address | undefined |
### paused
```solidity
function paused() external view returns (bool)
```
*Returns true if the contract is paused, and false otherwise.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### removeProver
```solidity
function removeProver(address _account) external nonpayable
```
Add an account from the prover list.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _account | address | The address of account to remove. |
### removeSequencer
```solidity
function removeSequencer(address _account) external nonpayable
```
Remove an account from the sequencer list.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _account | address | The address of account to remove. |
### renounceOwnership
```solidity
@@ -287,7 +368,7 @@ function renounceOwnership() external nonpayable
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions anymore. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby removing any functionality that is only available to the owner.*
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### revertBatch
@@ -298,7 +379,7 @@ function revertBatch(bytes _batchHeader, uint256 _count) external nonpayable
Revert a pending batch.
*one can only revert unfinalized batches.*
*If the owner want to revert a sequence of batches by sending multiple transactions, make sure to revert recent batches first.*
#### Parameters
@@ -307,6 +388,22 @@ Revert a pending batch.
| _batchHeader | bytes | undefined |
| _count | uint256 | undefined |
### setPause
```solidity
function setPause(bool _status) external nonpayable
```
Pause the contract
#### Parameters
| Name | Type | Description |
|---|---|---|
| _status | bool | The pause status to update. |
### transferOwnership
```solidity
@@ -323,13 +420,13 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateMaxNumL2TxInChunk
### updateMaxNumTxInChunk
```solidity
function updateMaxNumL2TxInChunk(uint256 _maxNumL2TxInChunk) external nonpayable
function updateMaxNumTxInChunk(uint256 _maxNumTxInChunk) external nonpayable
```
Update the value of `maxNumL2TxInChunk`.
Update the value of `maxNumTxInChunk`.
@@ -337,41 +434,7 @@ Update the value of `maxNumL2TxInChunk`.
| Name | Type | Description |
|---|---|---|
| _maxNumL2TxInChunk | uint256 | The new value of `maxNumL2TxInChunk`. |
### updateProver
```solidity
function updateProver(address _account, bool _status) external nonpayable
```
Update the status of prover.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _account | address | The address of account to update. |
| _status | bool | The status of the account to update. |
### updateSequencer
```solidity
function updateSequencer(address _account, bool _status) external nonpayable
```
Update the status of sequencer.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _account | address | The address of account to update. |
| _status | bool | The status of the account to update. |
| _maxNumTxInChunk | uint256 | The new value of `maxNumTxInChunk`. |
### updateVerifier
@@ -435,7 +498,7 @@ Return the message root of a committed batch.
### CommitBatch
```solidity
event CommitBatch(bytes32 indexed batchHash)
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash)
```
Emitted when a new batch is committed.
@@ -446,12 +509,13 @@ Emitted when a new batch is committed.
| Name | Type | Description |
|---|---|---|
| batchIndex `indexed` | uint256 | undefined |
| batchHash `indexed` | bytes32 | undefined |
### FinalizeBatch
```solidity
event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot)
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot)
```
Emitted when a batch is finalized.
@@ -462,10 +526,27 @@ Emitted when a batch is finalized.
| Name | Type | Description |
|---|---|---|
| batchIndex `indexed` | uint256 | undefined |
| batchHash `indexed` | bytes32 | undefined |
| stateRoot | bytes32 | undefined |
| withdrawRoot | bytes32 | undefined |
### Initialized
```solidity
event Initialized(uint8 version)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity
@@ -483,10 +564,26 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### Paused
```solidity
event Paused(address account)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| account | address | undefined |
### RevertBatch
```solidity
event RevertBatch(bytes32 indexed batchHash)
event RevertBatch(uint256 indexed batchIndex, bytes32 indexed batchHash)
```
revert a pending batch.
@@ -497,15 +594,16 @@ revert a pending batch.
| Name | Type | Description |
|---|---|---|
| batchIndex `indexed` | uint256 | undefined |
| batchHash `indexed` | bytes32 | undefined |
### UpdateMaxNumL2TxInChunk
### Unpaused
```solidity
event UpdateMaxNumL2TxInChunk(uint256 oldMaxNumL2TxInChunk, uint256 newMaxNumL2TxInChunk)
event Unpaused(address account)
```
Emitted when the value of `maxNumL2TxInChunk` is updated.
@@ -513,8 +611,24 @@ Emitted when the value of `maxNumL2TxInChunk` is updated.
| Name | Type | Description |
|---|---|---|
| oldMaxNumL2TxInChunk | uint256 | The old value of `maxNumL2TxInChunk`. |
| newMaxNumL2TxInChunk | uint256 | The new value of `maxNumL2TxInChunk`. |
| account | address | undefined |
### UpdateMaxNumTxInChunk
```solidity
event UpdateMaxNumTxInChunk(uint256 oldMaxNumTxInChunk, uint256 newMaxNumTxInChunk)
```
Emitted when the value of `maxNumTxInChunk` is updated.
#### Parameters
| Name | Type | Description |
|---|---|---|
| oldMaxNumTxInChunk | uint256 | The old value of `maxNumTxInChunk`. |
| newMaxNumTxInChunk | uint256 | The new value of `maxNumTxInChunk`. |
### UpdateProver
@@ -553,7 +667,7 @@ Emitted when owner updates the status of sequencer.
### UpdateVerifier
```solidity
event UpdateVerifier(address oldVerifier, address newVerifier)
event UpdateVerifier(address indexed oldVerifier, address indexed newVerifier)
```
Emitted when the address of rollup verifier is updated.
@@ -564,8 +678,8 @@ Emitted when the address of rollup verifier is updated.
| Name | Type | Description |
|---|---|---|
| oldVerifier | address | The address of old rollup verifier. |
| newVerifier | address | The address of new rollup verifier. |
| oldVerifier `indexed` | address | The address of old rollup verifier. |
| newVerifier `indexed` | address | The address of new rollup verifier. |

View File

@@ -54,7 +54,7 @@ describe("EnforcedTxGateway.spec", async () => {
await queue.initialize(constants.AddressZero, constants.AddressZero, gateway.address, oracle.address, 10000000);
await gateway.initialize(queue.address, feeVault.address);
await oracle.initialize(21000, 0, 8, 16);
await oracle.initialize(21000, 51000, 8, 16);
const Whitelist = await ethers.getContractFactory("Whitelist", deployer);
const whitelist = await Whitelist.deploy(deployer.address);

View File

@@ -1,7 +1,7 @@
/* eslint-disable node/no-unpublished-import */
/* eslint-disable node/no-missing-import */
import { ethers } from "hardhat";
import { GasSwap, MinimalForwarder, MockERC20, MockGasSwapTarget } from "../typechain";
import { GasSwap, ERC2771Forwarder, MockERC20, MockGasSwapTarget } from "../typechain";
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
import { expect } from "chai";
import { BigNumber, constants } from "ethers";
@@ -11,7 +11,7 @@ describe("GasSwap.spec", async () => {
let deployer: SignerWithAddress;
let signer: SignerWithAddress;
let forwarder: MinimalForwarder;
let forwarder: ERC2771Forwarder;
let swap: GasSwap;
let target: MockGasSwapTarget;
let token: MockERC20;
@@ -19,8 +19,8 @@ describe("GasSwap.spec", async () => {
beforeEach(async () => {
[deployer, signer] = await ethers.getSigners();
const MinimalForwarder = await ethers.getContractFactory("MinimalForwarder", deployer);
forwarder = await MinimalForwarder.deploy();
const ERC2771Forwarder = await ethers.getContractFactory("ERC2771Forwarder", deployer);
forwarder = await ERC2771Forwarder.deploy("ERC2771Forwarder");
await forwarder.deployed();
const GasSwap = await ethers.getContractFactory("GasSwap", deployer);
@@ -253,12 +253,13 @@ describe("GasSwap.spec", async () => {
await swap.updateFeeRatio(ethers.utils.parseEther(feeRatio).div(100));
const fee = amountOut.mul(feeRatio).div(100);
const req = {
const reqWithoutSignature = {
from: signer.address,
to: swap.address,
value: constants.Zero,
gas: 1000000,
nonce: 0,
nonce: await forwarder.nonces(signer.address),
deadline: 2000000000,
data: swap.interface.encodeFunctionData("swap", [
{
token: token.address,
@@ -278,8 +279,8 @@ describe("GasSwap.spec", async () => {
const signature = await signer._signTypedData(
{
name: "MinimalForwarder",
version: "0.0.1",
name: "ERC2771Forwarder",
version: "1",
chainId: (await ethers.provider.getNetwork()).chainId,
verifyingContract: forwarder.address,
},
@@ -305,17 +306,29 @@ describe("GasSwap.spec", async () => {
name: "nonce",
type: "uint256",
},
{
name: "deadline",
type: "uint48",
},
{
name: "data",
type: "bytes",
},
],
},
req
reqWithoutSignature
);
const balanceBefore = await signer.getBalance();
await forwarder.execute(req, signature);
await forwarder.execute({
from: reqWithoutSignature.from,
to: reqWithoutSignature.to,
value: reqWithoutSignature.value,
gas: reqWithoutSignature.gas,
deadline: reqWithoutSignature.deadline,
data: reqWithoutSignature.data,
signature,
});
const balanceAfter = await signer.getBalance();
expect(balanceAfter.sub(balanceBefore)).to.eq(amountOut.sub(fee));
expect(await token.balanceOf(signer.address)).to.eq(amountIn.mul(refundRatio).div(100));

View File

@@ -42,7 +42,7 @@ describe("L1MessageQueue", async () => {
deployer
);
await oracle.initialize(21000, 0, 8, 16);
await oracle.initialize(21000, 50000, 8, 16);
await queue.initialize(messenger.address, scrollChain.address, gateway.address, oracle.address, 10000000);
});
@@ -264,8 +264,8 @@ describe("L1MessageQueue", async () => {
});
it("should succeed", async () => {
// append 100 messages
for (let i = 0; i < 100; i++) {
// append 512 messages
for (let i = 0; i < 256 * 2; i++) {
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
}
@@ -274,7 +274,8 @@ describe("L1MessageQueue", async () => {
.to.emit(queue, "DequeueTransaction")
.withArgs(0, 50, 0);
for (let i = 0; i < 50; i++) {
expect(await queue.getCrossDomainMessage(i)).to.eq(constants.HashZero);
expect(await queue.isMessageSkipped(i)).to.eq(false);
expect(await queue.isMessageDropped(i)).to.eq(false);
}
expect(await queue.pendingQueueIndex()).to.eq(50);
@@ -284,7 +285,8 @@ describe("L1MessageQueue", async () => {
.withArgs(50, 10, 1023);
expect(await queue.pendingQueueIndex()).to.eq(60);
for (let i = 50; i < 60; i++) {
expect(BigNumber.from(await queue.getCrossDomainMessage(i))).to.gt(constants.Zero);
expect(await queue.isMessageSkipped(i)).to.eq(true);
expect(await queue.isMessageDropped(i)).to.eq(false);
}
// pop 20 messages, skip first 5
@@ -293,12 +295,76 @@ describe("L1MessageQueue", async () => {
.withArgs(60, 20, 31);
expect(await queue.pendingQueueIndex()).to.eq(80);
for (let i = 60; i < 65; i++) {
expect(BigNumber.from(await queue.getCrossDomainMessage(i))).to.gt(constants.Zero);
expect(await queue.isMessageSkipped(i)).to.eq(true);
expect(await queue.isMessageDropped(i)).to.eq(false);
}
for (let i = 65; i < 80; i++) {
expect(await queue.getCrossDomainMessage(i)).to.eq(constants.HashZero);
expect(await queue.isMessageSkipped(i)).to.eq(false);
expect(await queue.isMessageDropped(i)).to.eq(false);
}
// pop 256 messages with random skip
const bitmap = BigNumber.from("0x496525059c3f33758d17030403e45afe067b8a0ae1317cda0487fd2932cbea1a");
const tx = await queue.connect(scrollChain).popCrossDomainMessage(80, 256, bitmap);
await expect(tx).to.emit(queue, "DequeueTransaction").withArgs(80, 256, bitmap);
console.log("gas used:", (await tx.wait()).gasUsed.toString());
for (let i = 80; i < 80 + 256; i++) {
expect(await queue.isMessageSkipped(i)).to.eq(
bitmap
.shr(i - 80)
.and(1)
.eq(1)
);
expect(await queue.isMessageDropped(i)).to.eq(false);
}
});
// @note skip this random benchmark tests
for (const count1 of [1, 2, 128, 129, 256]) {
for (const count2 of [1, 2, 128, 129, 256]) {
for (const count3 of [1, 2, 128, 129, 256]) {
it.skip(`should succeed on random tests, pop three times each with ${count1} ${count2} ${count3} msgs`, async () => {
// append count1 + count2 + count3 messages
for (let i = 0; i < count1 + count2 + count3; i++) {
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
}
// first pop `count1` messages
const bitmap1 = BigNumber.from(randomBytes(32));
let tx = await queue.connect(scrollChain).popCrossDomainMessage(0, count1, bitmap1);
await expect(tx)
.to.emit(queue, "DequeueTransaction")
.withArgs(0, count1, bitmap1.and(constants.One.shl(count1).sub(1)));
for (let i = 0; i < count1; i++) {
expect(await queue.isMessageSkipped(i)).to.eq(bitmap1.shr(i).and(1).eq(1));
expect(await queue.isMessageDropped(i)).to.eq(false);
}
// then pop `count2` messages
const bitmap2 = BigNumber.from(randomBytes(32));
tx = await queue.connect(scrollChain).popCrossDomainMessage(count1, count2, bitmap2);
await expect(tx)
.to.emit(queue, "DequeueTransaction")
.withArgs(count1, count2, bitmap2.and(constants.One.shl(count2).sub(1)));
for (let i = 0; i < count2; i++) {
expect(await queue.isMessageSkipped(i + count1)).to.eq(bitmap2.shr(i).and(1).eq(1));
expect(await queue.isMessageDropped(i + count1)).to.eq(false);
}
// last pop `count3` messages
const bitmap3 = BigNumber.from(randomBytes(32));
tx = await queue.connect(scrollChain).popCrossDomainMessage(count1 + count2, count3, bitmap3);
await expect(tx)
.to.emit(queue, "DequeueTransaction")
.withArgs(count1 + count2, count3, bitmap3.and(constants.One.shl(count3).sub(1)));
for (let i = 0; i < count3; i++) {
expect(await queue.isMessageSkipped(i + count1 + count2)).to.eq(bitmap3.shr(i).and(1).eq(1));
expect(await queue.isMessageDropped(i + count1 + count2)).to.eq(false);
}
});
}
}
}
});
context("#dropCrossDomainMessage", async () => {
@@ -308,7 +374,7 @@ describe("L1MessageQueue", async () => {
);
});
it("should revert, when drop executed message", async () => {
it("should revert, when drop non-skipped message", async () => {
// append 10 messages
for (let i = 0; i < 10; i++) {
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
@@ -318,14 +384,13 @@ describe("L1MessageQueue", async () => {
.to.emit(queue, "DequeueTransaction")
.withArgs(0, 5, 0);
for (let i = 0; i < 5; i++) {
expect(await queue.getCrossDomainMessage(i)).to.eq(constants.HashZero);
expect(await queue.isMessageSkipped(i)).to.eq(false);
expect(await queue.isMessageDropped(i)).to.eq(false);
}
expect(await queue.pendingQueueIndex()).to.eq(5);
for (let i = 0; i < 5; i++) {
await expect(queue.connect(messenger).dropCrossDomainMessage(i)).to.revertedWith(
"message already dropped or executed"
);
await expect(queue.connect(messenger).dropCrossDomainMessage(i)).to.revertedWith("drop non-skipped message");
}
// drop pending message
@@ -345,9 +410,12 @@ describe("L1MessageQueue", async () => {
.withArgs(0, 10, 0x3ff);
for (let i = 0; i < 10; i++) {
expect(BigNumber.from(await queue.getCrossDomainMessage(i))).to.gt(constants.Zero);
expect(await queue.isMessageSkipped(i)).to.eq(true);
expect(await queue.isMessageDropped(i)).to.eq(false);
await expect(queue.connect(messenger).dropCrossDomainMessage(i)).to.emit(queue, "DropTransaction").withArgs(i);
expect(await queue.getCrossDomainMessage(i)).to.eq(constants.HashZero);
await expect(queue.connect(messenger).dropCrossDomainMessage(i)).to.revertedWith("message already dropped");
expect(await queue.isMessageSkipped(i)).to.eq(true);
expect(await queue.isMessageDropped(i)).to.eq(true);
}
});
});

View File

@@ -55,8 +55,8 @@
"typescript": "^4.5.2"
},
"dependencies": {
"@openzeppelin/contracts": "^v4.9.2",
"@openzeppelin/contracts-upgradeable": "^v4.9.2"
"@openzeppelin/contracts": "^v4.9.3",
"@openzeppelin/contracts-upgradeable": "^v4.9.3"
},
"lint-staged": {
"*.{js,ts}": "npx eslint --cache --fix",

View File

@@ -17,7 +17,6 @@ import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
import {L1StandardERC20Gateway} from "../../src/L1/gateways/L1StandardERC20Gateway.sol";
import {L1WETHGateway} from "../../src/L1/gateways/L1WETHGateway.sol";
import {L1DAIGateway} from "../../src/L1/gateways/L1DAIGateway.sol";
import {L2GasPriceOracle} from "../../src/L1/rollup/L2GasPriceOracle.sol";
import {MultipleVersionRollupVerifier} from "../../src/L1/rollup/MultipleVersionRollupVerifier.sol";
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
@@ -27,7 +26,7 @@ import {ZkEvmVerifierV1} from "../../src/libraries/verifier/ZkEvmVerifierV1.sol"
contract DeployL1BridgeContracts is Script {
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
uint32 CHAIN_ID_L2 = uint32(vm.envUint("CHAIN_ID_L2"));
uint64 CHAIN_ID_L2 = uint64(vm.envUint("CHAIN_ID_L2"));
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
@@ -56,7 +55,6 @@ contract DeployL1BridgeContracts is Script {
deployL1CustomERC20Gateway();
deployL1ERC721Gateway();
deployL1ERC1155Gateway();
deployL1DAIGateway();
vm.stopBroadcast();
}
@@ -204,18 +202,6 @@ contract DeployL1BridgeContracts is Script {
logAddress("L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR", address(proxy));
}
function deployL1DAIGateway() internal {
L1DAIGateway impl = new L1DAIGateway();
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(
address(impl),
address(proxyAdmin),
new bytes(0)
);
logAddress("L1_DAI_GATEWAY_IMPLEMENTATION_ADDR", address(impl));
logAddress("L1_DAI_GATEWAY_PROXY_ADDR", address(proxy));
}
function deployL1ERC721Gateway() internal {
L1ERC721Gateway impl = new L1ERC721Gateway();
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(

View File

@@ -0,0 +1,66 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.10;
import {Script} from "forge-std/Script.sol";
import {console} from "forge-std/console.sol";
import {TimelockController} from "@openzeppelin/contracts/governance/TimelockController.sol";
import {ScrollOwner} from "../../src/misc/ScrollOwner.sol";
// solhint-disable state-visibility
// solhint-disable var-name-mixedcase
contract DeployL1ScrollOwner is Script {
string NETWORK = vm.envString("NETWORK");
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
address SCROLL_MULTISIG_ADDR = vm.envAddress("L1_SCROLL_MULTISIG_ADDR");
address SECURITY_COUNCIL_ADDR = vm.envAddress("L1_SECURITY_COUNCIL_ADDR");
address L1_PROPOSAL_EXECUTOR_ADDR = vm.envAddress("L1_PROPOSAL_EXECUTOR_ADDR");
function run() external {
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
deployScrollOwner();
if (keccak256(abi.encodePacked(NETWORK)) == keccak256(abi.encodePacked("sepolia"))) {
// for sepolia
deployTimelockController("1D", 1 minutes);
deployTimelockController("7D", 7 minutes);
deployTimelockController("14D", 14 minutes);
} else if (keccak256(abi.encodePacked(NETWORK)) == keccak256(abi.encodePacked("mainnet"))) {
// for mainnet
deployTimelockController("1D", 1 days);
deployTimelockController("7D", 7 days);
deployTimelockController("14D", 14 days);
}
vm.stopBroadcast();
}
function deployScrollOwner() internal {
ScrollOwner owner = new ScrollOwner();
logAddress("L1_SCROLL_OWNER_ADDR", address(owner));
}
function deployTimelockController(string memory label, uint256 delay) internal {
address[] memory proposers = new address[](1);
address[] memory executors = new address[](1);
proposers[0] = SCROLL_MULTISIG_ADDR;
executors[0] = L1_PROPOSAL_EXECUTOR_ADDR;
TimelockController timelock = new TimelockController(delay, proposers, executors, SECURITY_COUNCIL_ADDR);
logAddress(string(abi.encodePacked("L1_", label, "_TIMELOCK_ADDR")), address(timelock));
}
function logAddress(string memory name, address addr) internal view {
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
}
}

View File

@@ -15,7 +15,6 @@ import {L2GatewayRouter} from "../../src/L2/gateways/L2GatewayRouter.sol";
import {L2ScrollMessenger} from "../../src/L2/L2ScrollMessenger.sol";
import {L2StandardERC20Gateway} from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
import {L2WETHGateway} from "../../src/L2/gateways/L2WETHGateway.sol";
import {L2DAIGateway} from "../../src/L2/gateways/L2DAIGateway.sol";
import {L1GasPriceOracle} from "../../src/L2/predeploys/L1GasPriceOracle.sol";
import {L2MessageQueue} from "../../src/L2/predeploys/L2MessageQueue.sol";
import {L2TxFeeVault} from "../../src/L2/predeploys/L2TxFeeVault.sol";
@@ -60,7 +59,6 @@ contract DeployL2BridgeContracts is Script {
deployL2CustomERC20Gateway();
deployL2ERC721Gateway();
deployL2ERC1155Gateway();
deployL2DAIGateway();
vm.stopBroadcast();
}
@@ -98,7 +96,7 @@ contract DeployL2BridgeContracts is Script {
}
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
L2TxFeeVault feeVault = new L2TxFeeVault(address(owner), L1_TX_FEE_RECIPIENT_ADDR);
L2TxFeeVault feeVault = new L2TxFeeVault(address(owner), L1_TX_FEE_RECIPIENT_ADDR, 10 ether);
logAddress("L2_TX_FEE_VAULT_ADDR", address(feeVault));
}
@@ -201,18 +199,6 @@ contract DeployL2BridgeContracts is Script {
logAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR", address(proxy));
}
function deployL2DAIGateway() internal {
L2DAIGateway impl = new L2DAIGateway();
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(
address(impl),
address(proxyAdmin),
new bytes(0)
);
logAddress("L2_DAI_GATEWAY_IMPLEMENTATION_ADDR", address(impl));
logAddress("L2_DAI_GATEWAY_PROXY_ADDR", address(proxy));
}
function deployL2ERC721Gateway() internal {
L2ERC721Gateway impl = new L2ERC721Gateway();
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(

View File

@@ -0,0 +1,66 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.10;
import {Script} from "forge-std/Script.sol";
import {console} from "forge-std/console.sol";
import {TimelockController} from "@openzeppelin/contracts/governance/TimelockController.sol";
import {ScrollOwner} from "../../src/misc/ScrollOwner.sol";
// solhint-disable state-visibility
// solhint-disable var-name-mixedcase
contract DeployL2ScrollOwner is Script {
string NETWORK = vm.envString("NETWORK");
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
address SCROLL_MULTISIG_ADDR = vm.envAddress("L2_SCROLL_MULTISIG_ADDR");
address SECURITY_COUNCIL_ADDR = vm.envAddress("L2_SECURITY_COUNCIL_ADDR");
address L2_PROPOSAL_EXECUTOR_ADDR = vm.envAddress("L2_PROPOSAL_EXECUTOR_ADDR");
function run() external {
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
deployScrollOwner();
if (keccak256(abi.encodePacked(NETWORK)) == keccak256(abi.encodePacked("sepolia"))) {
// for sepolia
deployTimelockController("1D", 1 minutes);
deployTimelockController("7D", 7 minutes);
deployTimelockController("14D", 14 minutes);
} else if (keccak256(abi.encodePacked(NETWORK)) == keccak256(abi.encodePacked("mainnet"))) {
// for mainnet
deployTimelockController("1D", 1 days);
deployTimelockController("7D", 7 days);
deployTimelockController("14D", 14 days);
}
vm.stopBroadcast();
}
function deployScrollOwner() internal {
ScrollOwner owner = new ScrollOwner();
logAddress("L2_SCROLL_OWNER_ADDR", address(owner));
}
function deployTimelockController(string memory label, uint256 delay) internal {
address[] memory proposers = new address[](1);
address[] memory executors = new address[](1);
proposers[0] = SCROLL_MULTISIG_ADDR;
executors[0] = L2_PROPOSAL_EXECUTOR_ADDR;
TimelockController timelock = new TimelockController(delay, proposers, executors, SECURITY_COUNCIL_ADDR);
logAddress(string(abi.encodePacked("L2_", label, "_TIMELOCK_ADDR")), address(timelock));
}
function logAddress(string memory name, address addr) internal view {
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
}
}

View File

@@ -11,7 +11,6 @@ import {L1GatewayRouter} from "../../src/L1/gateways/L1GatewayRouter.sol";
import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
import {L1StandardERC20Gateway} from "../../src/L1/gateways/L1StandardERC20Gateway.sol";
import {L1WETHGateway} from "../../src/L1/gateways/L1WETHGateway.sol";
import {L1DAIGateway} from "../../src/L1/gateways/L1DAIGateway.sol";
import {MultipleVersionRollupVerifier} from "../../src/L1/rollup/MultipleVersionRollupVerifier.sol";
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
@@ -22,16 +21,13 @@ contract InitializeL1BridgeContracts is Script {
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
uint256 MAX_TX_IN_CHUNK = vm.envUint("MAX_TX_IN_CHUNK");
uint256 MAX_L1_MESSAGE_GAS_LIMIT = vm.envUint("MAX_L1_MESSAGE_GAS_LIMIT");
address L1_COMMIT_SENDER_ADDRESS = vm.envAddress("L1_COMMIT_SENDER_ADDRESS");
address L1_FINALIZE_SENDER_ADDRESS = vm.envAddress("L1_FINALIZE_SENDER_ADDRESS");
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
address L1_DAI_ADDR = vm.envAddress("L1_DAI_ADDR");
address L2_DAI_ADDR = vm.envAddress("L2_DAI_ADDR");
address L1_WHITELIST_ADDR = vm.envAddress("L1_WHITELIST_ADDR");
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
address L1_MESSAGE_QUEUE_PROXY_ADDR = vm.envAddress("L1_MESSAGE_QUEUE_PROXY_ADDR");
@@ -44,7 +40,6 @@ contract InitializeL1BridgeContracts is Script {
address L1_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ETH_GATEWAY_PROXY_ADDR");
address L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
address L1_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_WETH_GATEWAY_PROXY_ADDR");
address L1_DAI_GATEWAY_PROXY_ADDR = vm.envAddress("L1_DAI_GATEWAY_PROXY_ADDR");
address L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR");
address L1_ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR");
@@ -56,7 +51,6 @@ contract InitializeL1BridgeContracts is Script {
address L2_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ETH_GATEWAY_PROXY_ADDR");
address L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
address L2_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_WETH_GATEWAY_PROXY_ADDR");
address L2_DAI_GATEWAY_PROXY_ADDR = vm.envAddress("L2_DAI_GATEWAY_PROXY_ADDR");
address L2_SCROLL_STANDARD_ERC20_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_ADDR");
address L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR");
@@ -67,7 +61,7 @@ contract InitializeL1BridgeContracts is Script {
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).initialize(
L1_MESSAGE_QUEUE_PROXY_ADDR,
L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR,
MAX_L2_TX_IN_CHUNK
MAX_TX_IN_CHUNK
);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addSequencer(L1_COMMIT_SENDER_ADDRESS);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addProver(L1_FINALIZE_SENDER_ADDRESS);
@@ -155,22 +149,12 @@ contract InitializeL1BridgeContracts is Script {
L1_SCROLL_MESSENGER_PROXY_ADDR
);
// initialize L1DAIGateway
L1DAIGateway(L1_DAI_GATEWAY_PROXY_ADDR).initialize(
L2_DAI_GATEWAY_PROXY_ADDR,
L1_GATEWAY_ROUTER_PROXY_ADDR,
L1_SCROLL_MESSENGER_PROXY_ADDR
);
L1DAIGateway(L1_DAI_GATEWAY_PROXY_ADDR).updateTokenMapping(L1_DAI_ADDR, L2_DAI_ADDR);
// set WETH and DAI gateways in router
// set WETH gateway in router
{
address[] memory _tokens = new address[](2);
address[] memory _tokens = new address[](1);
_tokens[0] = L1_WETH_ADDR;
_tokens[1] = L1_DAI_ADDR;
address[] memory _gateways = new address[](2);
address[] memory _gateways = new address[](1);
_gateways[0] = L1_WETH_GATEWAY_PROXY_ADDR;
_gateways[1] = L1_DAI_GATEWAY_PROXY_ADDR;
L1GatewayRouter(L1_GATEWAY_ROUTER_PROXY_ADDR).setERC20Gateway(_tokens, _gateways);
}

View File

@@ -0,0 +1,275 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.10;
import {Script} from "forge-std/Script.sol";
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
import {ProxyAdmin} from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol";
import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
import {L1USDCGateway} from "../../src/L1/gateways/usdc/L1USDCGateway.sol";
import {EnforcedTxGateway} from "../../src/L1/gateways/EnforcedTxGateway.sol";
import {L1CustomERC20Gateway} from "../../src/L1/gateways/L1CustomERC20Gateway.sol";
import {L1ERC1155Gateway} from "../../src/L1/gateways/L1ERC1155Gateway.sol";
import {L1ERC721Gateway} from "../../src/L1/gateways/L1ERC721Gateway.sol";
import {L1GatewayRouter} from "../../src/L1/gateways/L1GatewayRouter.sol";
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
import {ScrollMessengerBase} from "../../src/libraries/ScrollMessengerBase.sol";
import {L2GasPriceOracle} from "../../src/L1/rollup/L2GasPriceOracle.sol";
import {MultipleVersionRollupVerifier} from "../../src/L1/rollup/MultipleVersionRollupVerifier.sol";
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
import {ScrollOwner} from "../../src/misc/ScrollOwner.sol";
import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
// solhint-disable max-states-count
// solhint-disable state-visibility
// solhint-disable var-name-mixedcase
contract InitializeL1ScrollOwner is Script {
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
bytes32 constant SECURITY_COUNCIL_NO_DELAY_ROLE = keccak256("SECURITY_COUNCIL_NO_DELAY_ROLE");
bytes32 constant SCROLL_MULTISIG_NO_DELAY_ROLE = keccak256("SCROLL_MULTISIG_NO_DELAY_ROLE");
bytes32 constant EMERGENCY_MULTISIG_NO_DELAY_ROLE = keccak256("EMERGENCY_MULTISIG_NO_DELAY_ROLE");
bytes32 constant TIMELOCK_1DAY_DELAY_ROLE = keccak256("TIMELOCK_1DAY_DELAY_ROLE");
bytes32 constant TIMELOCK_7DAY_DELAY_ROLE = keccak256("TIMELOCK_7DAY_DELAY_ROLE");
address SCROLL_MULTISIG_ADDR = vm.envAddress("L1_SCROLL_MULTISIG_ADDR");
address SECURITY_COUNCIL_ADDR = vm.envAddress("L1_SECURITY_COUNCIL_ADDR");
address EMERGENCY_MULTISIG_ADDR = vm.envAddress("L1_EMERGENCY_MULTISIG_ADDR");
address L1_SCROLL_OWNER_ADDR = vm.envAddress("L1_SCROLL_OWNER_ADDR");
address L1_1D_TIMELOCK_ADDR = vm.envAddress("L1_1D_TIMELOCK_ADDR");
address L1_7D_TIMELOCK_ADDR = vm.envAddress("L1_7D_TIMELOCK_ADDR");
address L1_14D_TIMELOCK_ADDR = vm.envAddress("L1_14D_TIMELOCK_ADDR");
address L1_PROXY_ADMIN_ADDR = vm.envAddress("L1_PROXY_ADMIN_ADDR");
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
address L1_MESSAGE_QUEUE_PROXY_ADDR = vm.envAddress("L1_MESSAGE_QUEUE_PROXY_ADDR");
address L2_GAS_PRICE_ORACLE_PROXY_ADDR = vm.envAddress("L2_GAS_PRICE_ORACLE_PROXY_ADDR");
address L1_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L1_SCROLL_MESSENGER_PROXY_ADDR");
address L1_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L1_GATEWAY_ROUTER_PROXY_ADDR");
address L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
address L1_DAI_GATEWAY_PROXY_ADDR = vm.envAddress("L1_DAI_GATEWAY_PROXY_ADDR");
address L1_LIDO_GATEWAY_PROXY_ADDR = vm.envAddress("L1_LIDO_GATEWAY_PROXY_ADDR");
address L1_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ETH_GATEWAY_PROXY_ADDR");
address L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
address L1_USDC_GATEWAY_PROXY_ADDR = vm.envAddress("L1_USDC_GATEWAY_PROXY_ADDR");
address L1_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_WETH_GATEWAY_PROXY_ADDR");
address L1_ERC721_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC721_GATEWAY_PROXY_ADDR");
address L1_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC1155_GATEWAY_PROXY_ADDR");
address L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR");
address L1_ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR");
address L1_WHITELIST_ADDR = vm.envAddress("L1_WHITELIST_ADDR");
ScrollOwner owner;
function run() external {
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
owner = ScrollOwner(payable(L1_SCROLL_OWNER_ADDR));
// @note we don't config 14D access, since the default admin is a 14D timelock which can access all methods.
configProxyAdmin();
configScrollChain();
configL1MessageQueue();
configL1ScrollMessenger();
configL2GasPriceOracle();
configL1Whitelist();
configMultipleVersionRollupVerifier();
configL1GatewayRouter();
configL1CustomERC20Gateway();
configL1ERC721Gateway();
configL1ERC1155Gateway();
configL1USDCGateway();
configEnforcedTxGateway();
grantRoles();
transferOwnership();
vm.stopBroadcast();
}
function transferOwnership() internal {
Ownable(L1_PROXY_ADMIN_ADDR).transferOwnership(address(owner));
Ownable(L1_SCROLL_CHAIN_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_MESSAGE_QUEUE_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_SCROLL_MESSENGER_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L2_GAS_PRICE_ORACLE_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_WHITELIST_ADDR).transferOwnership(address(owner));
Ownable(L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR).transferOwnership(address(owner));
Ownable(L1_GATEWAY_ROUTER_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_DAI_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_LIDO_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_ETH_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_USDC_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_WETH_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_ERC721_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
Ownable(L1_ERC1155_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
}
function grantRoles() internal {
owner.grantRole(SECURITY_COUNCIL_NO_DELAY_ROLE, SECURITY_COUNCIL_ADDR);
owner.grantRole(SCROLL_MULTISIG_NO_DELAY_ROLE, SCROLL_MULTISIG_ADDR);
owner.grantRole(EMERGENCY_MULTISIG_NO_DELAY_ROLE, EMERGENCY_MULTISIG_ADDR);
owner.grantRole(TIMELOCK_1DAY_DELAY_ROLE, L1_1D_TIMELOCK_ADDR);
owner.grantRole(TIMELOCK_7DAY_DELAY_ROLE, L1_7D_TIMELOCK_ADDR);
owner.grantRole(owner.DEFAULT_ADMIN_ROLE(), L1_14D_TIMELOCK_ADDR);
owner.revokeRole(owner.DEFAULT_ADMIN_ROLE(), vm.addr(L1_DEPLOYER_PRIVATE_KEY));
}
function configProxyAdmin() internal {
bytes4[] memory _selectors;
// no delay, security council
_selectors = new bytes4[](2);
_selectors[0] = ProxyAdmin.upgrade.selector;
_selectors[1] = ProxyAdmin.upgradeAndCall.selector;
owner.updateAccess(L1_PROXY_ADMIN_ADDR, _selectors, SECURITY_COUNCIL_NO_DELAY_ROLE, true);
}
function configScrollChain() internal {
bytes4[] memory _selectors;
// no delay, scroll multisig and emergency multisig
_selectors = new bytes4[](4);
_selectors[0] = ScrollChain.revertBatch.selector;
_selectors[1] = ScrollChain.removeSequencer.selector;
_selectors[2] = ScrollChain.removeProver.selector;
_selectors[3] = ScrollChain.setPause.selector;
owner.updateAccess(L1_SCROLL_CHAIN_PROXY_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
owner.updateAccess(L1_SCROLL_CHAIN_PROXY_ADDR, _selectors, EMERGENCY_MULTISIG_NO_DELAY_ROLE, true);
// delay 1 day, scroll multisig
_selectors = new bytes4[](2);
_selectors[0] = ScrollChain.addSequencer.selector;
_selectors[1] = ScrollChain.addProver.selector;
owner.updateAccess(L1_SCROLL_CHAIN_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
// delay 7 day, scroll multisig
_selectors = new bytes4[](1);
_selectors[0] = ScrollChain.updateMaxNumTxInChunk.selector;
owner.updateAccess(L1_SCROLL_CHAIN_PROXY_ADDR, _selectors, TIMELOCK_7DAY_DELAY_ROLE, true);
}
function configL1MessageQueue() internal {
bytes4[] memory _selectors;
// delay 1 day, scroll multisig
_selectors = new bytes4[](2);
_selectors[0] = L1MessageQueue.updateGasOracle.selector;
_selectors[1] = L1MessageQueue.updateMaxGasLimit.selector;
owner.updateAccess(L1_MESSAGE_QUEUE_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
}
function configL1ScrollMessenger() internal {
bytes4[] memory _selectors;
// no delay, scroll multisig and emergency multisig
_selectors = new bytes4[](1);
_selectors[0] = ScrollMessengerBase.setPause.selector;
owner.updateAccess(L1_SCROLL_MESSENGER_PROXY_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
owner.updateAccess(L1_SCROLL_MESSENGER_PROXY_ADDR, _selectors, EMERGENCY_MULTISIG_NO_DELAY_ROLE, true);
// delay 1 day, scroll multisig
_selectors = new bytes4[](1);
_selectors[0] = L1ScrollMessenger.updateMaxReplayTimes.selector;
owner.updateAccess(L1_SCROLL_MESSENGER_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
}
function configL2GasPriceOracle() internal {
bytes4[] memory _selectors;
// no delay, scroll multisig and emergency multisig
_selectors = new bytes4[](1);
_selectors[0] = L2GasPriceOracle.setIntrinsicParams.selector;
owner.updateAccess(L2_GAS_PRICE_ORACLE_PROXY_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
owner.updateAccess(L2_GAS_PRICE_ORACLE_PROXY_ADDR, _selectors, EMERGENCY_MULTISIG_NO_DELAY_ROLE, true);
}
function configL1Whitelist() internal {
bytes4[] memory _selectors;
// delay 1 day, scroll multisig
_selectors = new bytes4[](1);
_selectors[0] = Whitelist.updateWhitelistStatus.selector;
owner.updateAccess(L1_WHITELIST_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
}
function configMultipleVersionRollupVerifier() internal {
bytes4[] memory _selectors;
// no delay, security council
_selectors = new bytes4[](1);
_selectors[0] = MultipleVersionRollupVerifier.updateVerifier.selector;
owner.updateAccess(L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR, _selectors, SECURITY_COUNCIL_NO_DELAY_ROLE, true);
// delay 7 day, scroll multisig
_selectors = new bytes4[](1);
_selectors[0] = MultipleVersionRollupVerifier.updateVerifier.selector;
owner.updateAccess(L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR, _selectors, TIMELOCK_7DAY_DELAY_ROLE, true);
}
function configL1GatewayRouter() internal {
bytes4[] memory _selectors;
// delay 1 day, scroll multisig
_selectors = new bytes4[](1);
_selectors[0] = L1GatewayRouter.setERC20Gateway.selector;
owner.updateAccess(L1_GATEWAY_ROUTER_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
}
function configL1CustomERC20Gateway() internal {
bytes4[] memory _selectors;
// delay 1 day, scroll multisig
_selectors = new bytes4[](1);
_selectors[0] = L1CustomERC20Gateway.updateTokenMapping.selector;
owner.updateAccess(L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
}
function configL1ERC721Gateway() internal {
bytes4[] memory _selectors;
// delay 1 day, scroll multisig
_selectors = new bytes4[](1);
_selectors[0] = L1ERC721Gateway.updateTokenMapping.selector;
owner.updateAccess(L1_ERC721_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
}
function configL1ERC1155Gateway() internal {
bytes4[] memory _selectors;
// delay 1 day, scroll multisig
_selectors = new bytes4[](1);
_selectors[0] = L1ERC1155Gateway.updateTokenMapping.selector;
owner.updateAccess(L1_ERC1155_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
}
function configL1USDCGateway() internal {
bytes4[] memory _selectors;
// delay 7 day, scroll multisig
_selectors = new bytes4[](3);
_selectors[0] = L1USDCGateway.updateCircleCaller.selector;
_selectors[1] = L1USDCGateway.pauseDeposit.selector;
_selectors[2] = L1USDCGateway.pauseWithdraw.selector;
owner.updateAccess(L1_USDC_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_7DAY_DELAY_ROLE, true);
}
function configEnforcedTxGateway() internal {
bytes4[] memory _selectors;
// no delay, scroll multisig and emergency multisig
_selectors = new bytes4[](1);
_selectors[0] = EnforcedTxGateway.setPause.selector;
owner.updateAccess(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
owner.updateAccess(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR, _selectors, EMERGENCY_MULTISIG_NO_DELAY_ROLE, true);
}
}

Some files were not shown because too many files have changed in this diff Show More