Compare commits

...

105 Commits

Author SHA1 Message Date
georgehao
d9c1000443 feat: update 2023-09-14 16:17:14 +08:00
georgehao
3f8c8d6a5f feat: update 2023-09-05 15:48:10 +08:00
georgehao
3bc3b4e306 feat: resolve conflict 2023-09-05 15:47:10 +08:00
colin
25b956f9b5 fix(gas-oracle): fetch base fee from the latest L1 block (#920)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-09-05 15:21:06 +08:00
colin
f4663fd249 feat(rollup-relayer): add number of blocks per chunk limit (#880)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-09-05 15:00:16 +08:00
Péter Garamvölgyi
c71fa5a5fc style: use MAX_TX_IN_CHUNK env name in deployment scripts (#921) 2023-09-04 14:35:04 +02:00
Péter Garamvölgyi
4af3834e36 test(fee-vault): add new test testCantWithdrawMoreThanBalance (#918)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-04 08:15:26 +02:00
Haichen Shen
9983585bdd docs: Update readme (#919) 2023-09-04 13:56:43 +08:00
Xi Lin
d288b34536 fix(contracts): OZ-L02 Anyone Can Steal ERC-20 Tokens From GasSwap (#844)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: icemelon <icemelon@users.noreply.github.com>
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
2023-09-04 12:31:41 +08:00
Haichen Shen
a2fe246551 docs(rollup): improve readme (#917)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-09-02 23:23:17 +08:00
Péter Garamvölgyi
8699a22fa3 feat(contracts): allow setting withdraw amount in fee vault (#912)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-09-02 02:23:53 -07:00
Xi Lin
d668180e9a fix(contracts): OZ-M04 Use of Non-Production-Ready Trusted Forwarder (#843)
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: icemelon <icemelon@users.noreply.github.com>
2023-09-02 02:23:31 -07:00
HAOYUatHZ
d3c2e34650 docs(rollup): fix README (#916) 2023-09-02 15:54:26 +08:00
HAOYUatHZ
38551c4eeb refactor: rename bridge to rollup (#644)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-09-02 15:42:01 +08:00
colinlyguo
2e0340460f fix: migration 2023-08-31 16:03:37 +08:00
colin
63d0ccf364 Merge branch 'develop' into feat/one-task-multi-prover 2023-08-31 16:02:06 +08:00
georgehao
4e3a2a4745 chore: auto version bump [bot] 2023-08-29 08:24:05 +00:00
georgehao
6e880302d3 feat: add proved_at 2023-08-29 16:23:37 +08:00
georgehao
2cb64e0c17 feat: update 2023-08-28 19:54:14 +08:00
georgehao
686cb00a4c feat: update 2023-08-28 19:48:40 +08:00
georgehao
af22052a9c Merge branch 'feat/one-task-multi-prover' of github.com:scroll-tech/scroll into feat/one-task-multi-prover 2023-08-28 19:48:13 +08:00
georgehao
2a542620a1 feat: update 2023-08-28 19:48:02 +08:00
georgehao
e33b9defc6 chore: auto version bump [bot] 2023-08-28 11:44:44 +00:00
georgehao
0349993beb feat: update 2023-08-28 19:44:21 +08:00
georgehao
3a8dce6041 feat: update 2023-08-28 19:43:40 +08:00
georgehao
575ab62b5d chore: auto version bump [bot] 2023-08-28 07:20:45 +00:00
georgehao
b2b65cdb40 xx 2023-08-28 15:20:05 +08:00
georgehao
e5488afd0e feat: update 2023-08-28 15:17:30 +08:00
georgehao
4cc47481f2 chore: auto version bump [bot] 2023-08-28 07:13:19 +00:00
georgehao
69c8e40718 feat: fix conflict 2023-08-28 15:12:51 +08:00
georgehao
24dc27e7b5 feat: updagte 2023-08-25 17:57:18 +08:00
georgehao
4dd11c6475 Merge branch 'feat/one-task-multi-prover' of github.com:scroll-tech/scroll into feat/one-task-multi-prover 2023-08-25 16:46:55 +08:00
georgehao
947be434c3 Merge branch 'develop' of github.com:scroll-tech/scroll into feat/one-task-multi-prover 2023-08-25 16:46:33 +08:00
georgehao
80c011a72c chore: auto version bump [bot] 2023-08-25 08:45:10 +00:00
georgehao
841dd4354c feat: update 2023-08-25 16:44:42 +08:00
georgehao
2441cc7119 Merge branch 'feat/one-task-multi-prover' of github.com:scroll-tech/scroll into feat/one-task-multi-prover 2023-08-25 16:03:13 +08:00
georgehao
c04ffc5726 Merge branch 'develop' of github.com:scroll-tech/scroll into feat/one-task-multi-prover 2023-08-25 16:02:52 +08:00
georgehao
e72627c171 chore: auto version bump [bot] 2023-08-25 08:02:31 +00:00
georgehao
290195d927 Merge branch 'feat/one-task-multi-prover' of github.com:scroll-tech/scroll into feat/one-task-multi-prover 2023-08-25 16:02:04 +08:00
georgehao
606fdba388 feat: update 2023-08-25 15:58:13 +08:00
georgehao
3a8fbee505 chore: auto version bump [bot] 2023-08-25 07:35:28 +00:00
georgehao
a16bcd090a feat: updage 2023-08-25 15:35:06 +08:00
georgehao
670779337f feat: fix 2023-08-25 15:14:20 +08:00
georgehao
3a924f6463 feat: resolve conflict 2023-08-25 11:58:00 +08:00
georgehao
7bd9d2748f chore: auto version bump [bot] 2023-08-25 02:57:20 +00:00
georgehao
753d617cc8 feat: conflict 2023-08-25 10:56:56 +08:00
georgehao
60f8046f9a feat: golint 2023-08-25 10:47:52 +08:00
georgehao
003205a954 feat: udpate 2023-08-24 20:46:08 +08:00
georgehao
0c27c64ade feat: one task assign multiple prover 2023-08-24 20:44:54 +08:00
georgehao
640a01dff7 chore: auto version bump [bot] 2023-08-24 07:41:07 +00:00
georgehao
35a52fc38f feat: fix conflic 2023-08-24 15:40:38 +08:00
georgehao
382fad507a feat: update 2023-08-23 20:07:23 +08:00
georgehao
f3bd1349a2 Merge branch 'feat/prover_task_unique' of github.com:scroll-tech/scroll into feat/prover_task_unique 2023-08-23 20:02:37 +08:00
georgehao
42cb40745e feat: update 2023-08-23 20:02:22 +08:00
georgehao
8649d23ec1 chore: auto version bump [bot] 2023-08-23 09:29:08 +00:00
georgehao
7255f8098c feat: updatge 2023-08-23 17:28:37 +08:00
georgehao
cee5d37caa chore: auto version bump [bot] 2023-08-23 09:27:24 +00:00
georgehao
4c738d759c feat: update 2023-08-23 17:26:51 +08:00
georgehao
8a70fc8bf3 chore: auto version bump [bot] 2023-08-23 09:25:33 +00:00
georgehao
b6afe29307 Merge branch 'feat/prover_task_unique' of github.com:scroll-tech/scroll into feat/prover_task_unique 2023-08-23 17:24:59 +08:00
georgehao
f33ec93eb6 feat: update 2023-08-23 17:24:35 +08:00
georgehao
d64d646e43 chore: auto version bump [bot] 2023-08-23 09:09:07 +00:00
georgehao
3f678a0f9b Update coordinator/internal/logic/provertask/chunk_prover_task.go
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-08-23 17:08:45 +08:00
georgehao
5f7de85912 chore: auto version bump [bot] 2023-08-23 04:03:52 +00:00
georgehao
7dcfa17e7c Merge branch 'develop' into feat/prover_task_unique 2023-08-23 12:03:25 +08:00
georgehao
4b3a58aaa5 chore: auto version bump [bot] 2023-08-23 04:01:04 +00:00
georgehao
84bdae3a01 Merge branch 'develop' into feat/prover_task_unique 2023-08-23 12:00:39 +08:00
georgehao
404e44297d chore: auto version bump [bot] 2023-08-23 03:12:41 +00:00
georgehao
93510798d1 feat: update 2023-08-23 11:12:07 +08:00
georgehao
e4e3ca6851 Merge branch 'feat/prover_task_unique' of github.com:scroll-tech/scroll into feat/prover_task_unique 2023-08-23 11:10:50 +08:00
georgehao
2efb82d58e Merge branch 'develop' of github.com:scroll-tech/scroll into feat/prover_task_unique 2023-08-23 11:10:23 +08:00
georgehao
8499b56092 chore: auto version bump [bot] 2023-08-23 03:09:43 +00:00
georgehao
519fc61151 Merge branch 'feat/prover_task_unique' of github.com:scroll-tech/scroll into feat/prover_task_unique 2023-08-23 11:08:59 +08:00
georgehao
c5f2be41be feat: update 2023-08-23 11:08:38 +08:00
georgehao
5a3b91147a chore: auto version bump [bot] 2023-08-22 12:28:38 +00:00
georgehao
7b721a7397 feat: fix conflict 2023-08-22 20:28:09 +08:00
georgehao
e523d61d1a feat: update 2023-08-22 20:27:28 +08:00
georgehao
510a519069 feat: add migrate sql 2023-08-22 16:50:47 +08:00
georgehao
808c68b4be feat: update 2023-08-22 16:27:01 +08:00
georgehao
d0d4b6e4f3 feat: update 2023-08-22 15:49:46 +08:00
georgehao
8c05add1b2 feat: remove debug code 2023-08-22 15:46:08 +08:00
georgehao
edb75c8cb3 Merge branch 'feat/prover_task_unique' of github.com:scroll-tech/scroll into feat/prover_task_unique 2023-08-22 15:43:05 +08:00
georgehao
98135e04e1 feat: update 2023-08-22 15:42:52 +08:00
georgehao
a40e7d1534 chore: auto version bump [bot] 2023-08-22 07:41:49 +00:00
georgehao
2e858a38be feat: fix conflict 2023-08-22 15:41:22 +08:00
georgehao
95e146ab1f feat: updat
e
2023-08-22 15:37:09 +08:00
georgehao
0676f23c02 Merge branch 'feat/prover_task_unique' of github.com:scroll-tech/scroll into feat/prover_task_unique 2023-08-22 15:36:43 +08:00
georgehao
59d9bb25af feat: update uuid generate style 2023-08-22 15:36:31 +08:00
georgehao
965fbcff65 chore: auto version bump [bot] 2023-08-22 04:15:59 +00:00
georgehao
b32b023c50 Merge branch 'develop' into feat/prover_task_unique 2023-08-22 12:15:36 +08:00
georgehao
3df2e0267a chore: auto version bump [bot] 2023-08-22 01:36:52 +00:00
georgehao
ee0351907b feat: update 2023-08-22 09:36:22 +08:00
georgehao
77a3de1646 feat: update 2023-08-21 19:33:51 +08:00
georgehao
49c6e7ded7 feat: update 2023-08-21 19:31:43 +08:00
georgehao
3e8e08dccc trigger ci 2023-08-21 18:47:34 +08:00
georgehao
7bb047aadc feat: update 2023-08-21 18:14:38 +08:00
georgehao
aa2e3dc996 feat: update 2023-08-21 18:11:23 +08:00
georgehao
ab3de62357 feat: update 2023-08-21 18:08:04 +08:00
georgehao
5607d1846f feat: prover get/submit uuid 2023-08-21 18:07:10 +08:00
georgehao
e7bf8b079d feat: update 2023-08-21 17:57:11 +08:00
georgehao
0e12352be3 feat: update 2023-08-21 17:52:01 +08:00
georgehao
e16d50d912 chore: auto version bump [bot] 2023-08-21 09:48:35 +00:00
georgehao
04fe23f95c Merge branch 'develop' into feat/prover_task_unique 2023-08-21 17:48:29 +08:00
georgehao
0a5465a750 feat: update 2023-08-21 17:47:00 +08:00
georgehao
d2f2dae3de feat: make the prover_task to unique 2023-08-21 17:42:51 +08:00
114 changed files with 1526 additions and 893 deletions

View File

@@ -36,7 +36,7 @@ jobs:
- name: Build prerequisites
run: |
make dev_docker
make -C bridge mock_abi
make -C rollup mock_abi
make -C common/bytecode all
- name: Run integration tests
run: |

View File

@@ -7,22 +7,12 @@ on:
- staging
- develop
- alpha
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
defaults:
run:
working-directory: 'prover-stats-api'
jobs:
check:

View File

@@ -1,4 +1,4 @@
name: Bridge
name: Rollup
on:
push:
@@ -8,11 +8,11 @@ on:
- develop
- alpha
paths:
- 'bridge/**'
- 'rollup/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/bridge.yml'
- '.github/workflows/rollup.yml'
pull_request:
types:
- opened
@@ -20,11 +20,11 @@ on:
- synchronize
- ready_for_review
paths:
- 'bridge/**'
- 'rollup/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/bridge.yml'
- '.github/workflows/rollup.yml'
jobs:
check:
@@ -46,7 +46,7 @@ jobs:
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint
working-directory: 'bridge'
working-directory: 'rollup'
run: |
rm -rf $HOME/.cache/golangci-lint
make mock_abi
@@ -64,14 +64,14 @@ jobs:
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
run: goimports -local scroll-tech/bridge/ -w .
working-directory: 'bridge'
run: goimports -local scroll-tech/rollup/ -w .
working-directory: 'rollup'
- name: Run go mod tidy
run: go mod tidy
working-directory: 'bridge'
working-directory: 'rollup'
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'bridge'
working-directory: 'rollup'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
@@ -97,13 +97,13 @@ jobs:
- name: Build prerequisites
run: |
make dev_docker
make -C bridge mock_abi
- name: Build bridge binaries
working-directory: 'bridge'
make -C rollup mock_abi
- name: Build rollup binaries
working-directory: 'rollup'
run: |
make bridge_bins
- name: Test bridge packages
working-directory: 'bridge'
make rollup_bins
- name: Test rollup packages
working-directory: 'rollup'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
@@ -111,7 +111,7 @@ jobs:
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: bridge
flags: rollup
# docker-build:
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest

17
.gitignore vendored
View File

@@ -1,9 +1,22 @@
.idea
# Asset files
assets/params*
assets/seed
coverage.txt
# Built binaries
build/bin
coverage.txt
*.integration.txt
# Visual Studio Code
.vscode
# IntelliJ
.idea
# MacOS
.DS_Store
# misc
sftp-config.json
*~

View File

@@ -8,7 +8,7 @@ help: ## Display this help message
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
lint: ## The code's format and security checks.
make -C bridge lint
make -C rollup lint
make -C common lint
make -C coordinator lint
make -C database lint
@@ -17,7 +17,7 @@ lint: ## The code's format and security checks.
update: ## update dependencies
go work sync
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
@@ -25,7 +25,7 @@ update: ## update dependencies
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/rollup/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .

View File

@@ -1,7 +1,30 @@
# Scroll Monorepo
[![rollup](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml)
[![contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml)
[![bridge-history](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml)
[![coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml)
[![prover](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml)
[![integration](https://github.com/scroll-tech/scroll/actions/workflows/integration.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/integration.yml)
[![codecov](https://codecov.io/gh/scroll-tech/scroll/branch/develop/graph/badge.svg?token=VJVHNQWGGW)](https://codecov.io/gh/scroll-tech/scroll)
<a href="https://scroll.io">Scroll</a> is a zkRollup Layer 2 dedicated to enhance Ethereum scalability through a bytecode-equivalent [zkEVM](https://github.com/scroll-tech/zkevm-circuits) circuit. This monorepo encompasses essential infrastructure components of the Scroll protocol. It contains the L1 and L2 contracts, the rollup node, the prover client, and the prover coordinator.
## Directory Structure
<pre>
├── <a href="./bridge-history-api/">bridge-history-api</a>: Bridge history service that collects deposit and withdraw events from both L1 and L2 chain and generates withdrawal proofs
├── <a href="./common/">common</a>: Common libraries and types
├── <a href="./coordinator/">coordinator</a>: Prover coorindator service that dispatches proving tasks to provers
├── <a href="./database">database</a>: Database client and schema definition
├── <a href="./src">l2geth</a>: Scroll execution node
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
├── <a href="./prover-stats-api">prover-stats-api</a>: Collect and show prover statistics
├── <a href="./prover-stats-api">rollup</a>: Rollup-related services
├── <a href="./rpc-gateway">rpc-gateway</a>: RPC gateway external repo
└── <a href="./tests">tests</a>: Integration tests
</pre>
## Prerequisites
+ Go 1.19
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
@@ -15,14 +38,14 @@ docker pull postgres
make dev_docker
```
## Testing Bridge & Coordinator
## Testing Rollup & Coordinator
### For Non-Apple Silicon (M1/M2) Macs
Run the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/bridge/...
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
@@ -55,7 +78,7 @@ This command runs a Docker container named `scroll_test_container` from the `scr
Once the Docker container is running, execute the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/bridge/...
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
@@ -63,6 +86,10 @@ go test -v -race -covermode=atomic scroll-tech/common/...
## Testing Contracts
You can find the unit tests in [`<REPO_DIR>/contracts/src/test/`](/contracts/src/test/), and integration tests in [`<REPO_DIR>/contracts/integration-test/`](/contracts/integration-test/).
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).
For more details on contracts, see [`/contracts`](/contracts).
See [`contracts`](/contracts) for more details on the contracts.
## License
Scroll Monorepo is licensed under the [MIT](./LICENSE) license.

View File

@@ -1,39 +0,0 @@
# Bridge
This repo contains the Scroll bridge.
## Dependency
+ install `abigen`
``` bash
go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
```
## Build
```bash
make clean
make mock_abi
make bridge_bins
```
## Start
(Note: make sure you use different private keys for different senders in config.json.)
* use default ports and config.json.
```bash
./build/bin/event_watcher --http
./build/bin/gas_oracle --http
./build/bin/rollup_relayer --http
```
* use specified ports and config.json
```bash
./build/bin/event_watcher --config ./config.json --http --http.addr localhost --http.port 8290
./build/bin/gas_oracle --config ./config.json --http --http.addr localhost --http.port 8290
./build/bin/rollup_relayer --config ./config.json --http --http.addr localhost --http.port 8290
```

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge/cmd/event_watcher/app"
func main() {
app.Run()
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge/cmd/gas_oracle/app"
func main() {
app.Run()
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge/cmd/rollup_relayer/app"
func main() {
app.Run()
}

View File

@@ -1,54 +0,0 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l1RelayerMetrics struct {
bridgeL1RelayedMsgsTotal prometheus.Counter
bridgeL1RelayedMsgsFailureTotal prometheus.Counter
bridgeL1RelayerGasPriceOraclerRunTotal prometheus.Counter
bridgeL1RelayerLastGasPrice prometheus.Gauge
bridgeL1MsgsRelayedConfirmedTotal prometheus.Counter
bridgeL1GasOraclerConfirmedTotal prometheus.Counter
}
var (
initL1RelayerMetricOnce sync.Once
l1RelayerMetric *l1RelayerMetrics
)
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
initL1RelayerMetricOnce.Do(func() {
l1RelayerMetric = &l1RelayerMetrics{
bridgeL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_msg_relayed_total",
Help: "The total number of the l1 relayed message.",
}),
bridgeL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_msg_relayed_failure_total",
Help: "The total number of the l1 relayed failure message.",
}),
bridgeL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_relayed_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
bridgeL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_gas_price_oracler_total",
Help: "The total number of layer1 gas price oracler run total",
}),
bridgeL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_layer1_gas_price_latest_gas_price",
Help: "The latest gas price of bridge relayer l1",
}),
bridgeL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_gas_oracler_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
}
})
return l1RelayerMetric
}

View File

@@ -1,84 +0,0 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l2RelayerMetrics struct {
bridgeL2RelayerProcessPendingBatchTotal prometheus.Counter
bridgeL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
bridgeL2RelayerGasPriceOraclerRunTotal prometheus.Counter
bridgeL2RelayerLastGasPrice prometheus.Gauge
bridgeL2RelayerProcessCommittedBatchesTotal prometheus.Counter
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
bridgeL2ChainMonitorLatestFailedCall prometheus.Counter
bridgeL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
}
var (
initL2RelayerMetricOnce sync.Once
l2RelayerMetric *l2RelayerMetrics
)
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
initL2RelayerMetricOnce.Do(func() {
l2RelayerMetric = &l2RelayerMetrics{
bridgeL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_pending_batch_total",
Help: "The total number of layer2 process pending batch",
}),
bridgeL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_pending_batch_success_total",
Help: "The total number of layer2 process pending success batch",
}),
bridgeL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_gas_price_oracler_total",
Help: "The total number of layer2 gas price oracler run total",
}),
bridgeL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_layer2_gas_price_latest_gas_price",
Help: "The latest gas price of bridge relayer l2",
}),
bridgeL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_total",
Help: "The total number of layer2 process committed batches run total",
}),
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_finalized_total",
Help: "The total number of layer2 process committed batches finalized total",
}),
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_finalized_success_total",
Help: "The total number of layer2 process committed batches finalized success total",
}),
bridgeL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_confirmed_total",
Help: "The total number of layer2 process committed batches confirmed total",
}),
bridgeL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_finalized_batches_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
bridgeL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
bridgeL2ChainMonitorLatestFailedCall: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_chain_monitor_latest_failed_batch_call",
Help: "The total number of failed call chain_monitor api",
}),
bridgeL2ChainMonitorLatestFailedBatchStatus: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_chain_monitor_latest_failed_batch_status",
Help: "The total number of failed batch status get from chain_monitor",
}),
}
})
return l2RelayerMetric
}

View File

@@ -20,7 +20,7 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
FROM scrolltech/go-rust-builder:go-1.19-rust-nightly-2022-12-10 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/

View File

@@ -3,7 +3,7 @@ FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/

View File

@@ -3,7 +3,7 @@ FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
@@ -18,7 +18,7 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
cd /src/rollup/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
# Pull event_watcher into a second stage deploy alpine container
FROM alpine:latest

View File

@@ -3,7 +3,7 @@ FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
@@ -18,7 +18,7 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
cd /src/rollup/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
# Pull gas_oracle into a second stage deploy alpine container
FROM alpine:latest

View File

@@ -3,7 +3,7 @@ FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/

View File

@@ -3,7 +3,7 @@ FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
@@ -18,7 +18,7 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
cd /src/rollup/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy alpine container
FROM alpine:latest

View File

@@ -3,7 +3,7 @@ set -uex
profile_name=$1
exclude_dirs=("scroll-tech/bridge/cmd" "scroll-tech/bridge/tests" "scroll-tech/bridge/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
exclude_dirs=("scroll-tech/rollup/cmd" "scroll-tech/rollup/tests" "scroll-tech/rollup/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
coverpkg="scroll-tech/${profile_name}"

View File

@@ -6,7 +6,7 @@ flag_management:
default_rules:
carryforward: true
individual_flags:
- name: bridge
- name: rollup
statuses:
- type: project
target: auto

View File

@@ -103,6 +103,12 @@ const (
ProverTaskFailureTypeUndefined ProverTaskFailureType = iota
// ProverTaskFailureTypeTimeout prover task failure of timeout
ProverTaskFailureTypeTimeout
// ProverTaskFailureTypeSubmitStatusNotOk prover task failure of validated failed by coordinator
ProverTaskFailureTypeSubmitStatusNotOk
// ProverTaskFailureTypeVerifiedFailed prover task failure of verified failed by coordinator
ProverTaskFailureTypeVerifiedFailed
// ProverTaskFailureTypeServerError collect occur error
ProverTaskFailureTypeServerError
)
func (r ProverTaskFailureType) String() string {
@@ -111,8 +117,14 @@ func (r ProverTaskFailureType) String() string {
return "prover task failure undefined"
case ProverTaskFailureTypeTimeout:
return "prover task failure timeout"
case ProverTaskFailureTypeSubmitStatusNotOk:
return "prover task failure validated submit proof status not ok"
case ProverTaskFailureTypeVerifiedFailed:
return "prover task failure verified failed"
case ProverTaskFailureTypeServerError:
return "prover task failure server exception"
default:
return "illegal prover task failure type"
return fmt.Sprintf("illegal prover task failure type (%d)", int32(r))
}
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.2.16"
var tag = "v4.2.21"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -1,7 +1,7 @@
/* eslint-disable node/no-unpublished-import */
/* eslint-disable node/no-missing-import */
import { ethers } from "hardhat";
import { GasSwap, MinimalForwarder, MockERC20, MockGasSwapTarget } from "../typechain";
import { GasSwap, ERC2771Forwarder, MockERC20, MockGasSwapTarget } from "../typechain";
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
import { expect } from "chai";
import { BigNumber, constants } from "ethers";
@@ -11,7 +11,7 @@ describe("GasSwap.spec", async () => {
let deployer: SignerWithAddress;
let signer: SignerWithAddress;
let forwarder: MinimalForwarder;
let forwarder: ERC2771Forwarder;
let swap: GasSwap;
let target: MockGasSwapTarget;
let token: MockERC20;
@@ -19,8 +19,8 @@ describe("GasSwap.spec", async () => {
beforeEach(async () => {
[deployer, signer] = await ethers.getSigners();
const MinimalForwarder = await ethers.getContractFactory("MinimalForwarder", deployer);
forwarder = await MinimalForwarder.deploy();
const ERC2771Forwarder = await ethers.getContractFactory("ERC2771Forwarder", deployer);
forwarder = await ERC2771Forwarder.deploy("ERC2771Forwarder");
await forwarder.deployed();
const GasSwap = await ethers.getContractFactory("GasSwap", deployer);
@@ -253,12 +253,13 @@ describe("GasSwap.spec", async () => {
await swap.updateFeeRatio(ethers.utils.parseEther(feeRatio).div(100));
const fee = amountOut.mul(feeRatio).div(100);
const req = {
const reqWithoutSignature = {
from: signer.address,
to: swap.address,
value: constants.Zero,
gas: 1000000,
nonce: 0,
nonce: await forwarder.nonces(signer.address),
deadline: 2000000000,
data: swap.interface.encodeFunctionData("swap", [
{
token: token.address,
@@ -278,8 +279,8 @@ describe("GasSwap.spec", async () => {
const signature = await signer._signTypedData(
{
name: "MinimalForwarder",
version: "0.0.1",
name: "ERC2771Forwarder",
version: "1",
chainId: (await ethers.provider.getNetwork()).chainId,
verifyingContract: forwarder.address,
},
@@ -305,17 +306,29 @@ describe("GasSwap.spec", async () => {
name: "nonce",
type: "uint256",
},
{
name: "deadline",
type: "uint48",
},
{
name: "data",
type: "bytes",
},
],
},
req
reqWithoutSignature
);
const balanceBefore = await signer.getBalance();
await forwarder.execute(req, signature);
await forwarder.execute({
from: reqWithoutSignature.from,
to: reqWithoutSignature.to,
value: reqWithoutSignature.value,
gas: reqWithoutSignature.gas,
deadline: reqWithoutSignature.deadline,
data: reqWithoutSignature.data,
signature,
});
const balanceAfter = await signer.getBalance();
expect(balanceAfter.sub(balanceBefore)).to.eq(amountOut.sub(fee));
expect(await token.balanceOf(signer.address)).to.eq(amountIn.mul(refundRatio).div(100));

View File

@@ -22,7 +22,7 @@ contract InitializeL1BridgeContracts is Script {
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
uint256 MAX_TX_IN_CHUNK = vm.envUint("MAX_TX_IN_CHUNK");
uint256 MAX_L1_MESSAGE_GAS_LIMIT = vm.envUint("MAX_L1_MESSAGE_GAS_LIMIT");
address L1_COMMIT_SENDER_ADDRESS = vm.envAddress("L1_COMMIT_SENDER_ADDRESS");
address L1_FINALIZE_SENDER_ADDRESS = vm.envAddress("L1_FINALIZE_SENDER_ADDRESS");
@@ -67,7 +67,7 @@ contract InitializeL1BridgeContracts is Script {
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).initialize(
L1_MESSAGE_QUEUE_PROXY_ADDR,
L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR,
MAX_L2_TX_IN_CHUNK
MAX_TX_IN_CHUNK
);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addSequencer(L1_COMMIT_SENDER_ADDRESS);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addProver(L1_FINALIZE_SENDER_ADDRESS);

View File

@@ -104,6 +104,9 @@ contract GasSwap is ERC2771Context, ReentrancyGuard, OwnableBase {
_permit.s
);
// record token balance in this contract
uint256 _balance = IERC20(_permit.token).balanceOf(address(this));
// transfer token
IERC20(_permit.token).safeTransferFrom(_sender, address(this), _permit.value);
@@ -128,7 +131,7 @@ contract GasSwap is ERC2771Context, ReentrancyGuard, OwnableBase {
require(_success, "transfer ETH failed");
// refund rest token
uint256 _dust = IERC20(_permit.token).balanceOf(address(this));
uint256 _dust = IERC20(_permit.token).balanceOf(address(this)) - _balance;
if (_dust > 0) {
IERC20(_permit.token).safeTransfer(_sender, _dust);
}

View File

@@ -103,29 +103,34 @@ abstract contract FeeVault is OwnableBase {
receive() external payable {}
/// @notice Triggers a withdrawal of funds to the L1 fee wallet.
function withdraw() external {
uint256 value = address(this).balance;
/// @param _value The amount of ETH to withdraw.
function withdraw(uint256 _value) public {
require(
value >= minWithdrawAmount,
_value >= minWithdrawAmount,
"FeeVault: withdrawal amount must be greater than minimum withdrawal amount"
);
unchecked {
totalProcessed += value;
totalProcessed += _value;
}
emit Withdrawal(value, recipient, msg.sender);
emit Withdrawal(_value, recipient, msg.sender);
// no fee provided
IL2ScrollMessenger(messenger).sendMessage{value: value}(
IL2ScrollMessenger(messenger).sendMessage{value: _value}(
recipient,
value,
_value,
bytes(""), // no message (simple eth transfer)
0 // _gasLimit can be zero for fee vault.
);
}
/// @notice Triggers a withdrawal of all available funds to the L1 fee wallet.
function withdraw() external {
uint256 value = address(this).balance;
withdraw(value);
}
/************************
* Restricted Functions *
************************/

View File

@@ -0,0 +1,391 @@
// SPDX-License-Identifier: MIT
// @note This file is directly copied from OpenZeppelin's master branch:
// https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/metatx/ERC2771Forwarder.sol
// Modifications are made to make it compatible with solidity 0.8.16.
pragma solidity =0.8.16;
import {ERC2771Context} from "@openzeppelin/contracts/metatx/ERC2771Context.sol";
import {ECDSA} from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol";
import {EIP712} from "@openzeppelin/contracts/utils/cryptography/EIP712.sol";
import {Nonces} from "./Nonces.sol";
import {Address} from "@openzeppelin/contracts/utils/Address.sol";
/**
* @dev A forwarder compatible with ERC2771 contracts. See {ERC2771Context}.
*
* This forwarder operates on forward requests that include:
*
* * `from`: An address to operate on behalf of. It is required to be equal to the request signer.
* * `to`: The address that should be called.
* * `value`: The amount of native token to attach with the requested call.
* * `gas`: The amount of gas limit that will be forwarded with the requested call.
* * `nonce`: A unique transaction ordering identifier to avoid replayability and request invalidation.
* * `deadline`: A timestamp after which the request is not executable anymore.
* * `data`: Encoded `msg.data` to send with the requested call.
*
* Relayers are able to submit batches if they are processing a high volume of requests. With high
* throughput, relayers may run into limitations of the chain such as limits on the number of
* transactions in the mempool. In these cases the recommendation is to distribute the load among
* multiple accounts.
*
* NOTE: Batching requests includes an optional refund for unused `msg.value` that is achieved by
* performing a call with empty calldata. While this is within the bounds of ERC-2771 compliance,
* if the refund receiver happens to consider the forwarder a trusted forwarder, it MUST properly
* handle `msg.data.length == 0`. `ERC2771Context` in OpenZeppelin Contracts versions prior to 4.9.3
* do not handle this properly.
*
* ==== Security Considerations
*
* If a relayer submits a forward request, it should be willing to pay up to 100% of the gas amount
* specified in the request. This contract does not implement any kind of retribution for this gas,
* and it is assumed that there is an out of band incentive for relayers to pay for execution on
* behalf of signers. Often, the relayer is operated by a project that will consider it a user
* acquisition cost.
*
* By offering to pay for gas, relayers are at risk of having that gas used by an attacker toward
* some other purpose that is not aligned with the expected out of band incentives. If you operate a
* relayer, consider whitelisting target contracts and function selectors. When relaying ERC-721 or
* ERC-1155 transfers specifically, consider rejecting the use of the `data` field, since it can be
* used to execute arbitrary code.
*/
contract ERC2771Forwarder is EIP712, Nonces {
using ECDSA for bytes32;
struct ForwardRequestData {
address from;
address to;
uint256 value;
uint256 gas;
uint48 deadline;
bytes data;
bytes signature;
}
bytes32 internal constant _FORWARD_REQUEST_TYPEHASH =
keccak256(
"ForwardRequest(address from,address to,uint256 value,uint256 gas,uint256 nonce,uint48 deadline,bytes data)"
);
/**
* @dev Emitted when a `ForwardRequest` is executed.
*
* NOTE: An unsuccessful forward request could be due to an invalid signature, an expired deadline,
* or simply a revert in the requested call. The contract guarantees that the relayer is not able to force
* the requested call to run out of gas.
*/
event ExecutedForwardRequest(address indexed signer, uint256 nonce, bool success);
/**
* @dev The request `from` doesn't match with the recovered `signer`.
*/
error ERC2771ForwarderInvalidSigner(address signer, address from);
/**
* @dev The `requestedValue` doesn't match with the available `msgValue`.
*/
error ERC2771ForwarderMismatchedValue(uint256 requestedValue, uint256 msgValue);
/**
* @dev The request `deadline` has expired.
*/
error ERC2771ForwarderExpiredRequest(uint48 deadline);
/**
* @dev The request target doesn't trust the `forwarder`.
*/
error ERC2771UntrustfulTarget(address target, address forwarder);
/**
* @dev A call to an address target failed. The target may have reverted.
*/
error FailedInnerCall();
/**
* @dev See {EIP712-constructor}.
*/
constructor(string memory name) EIP712(name, "1") {}
/**
* @dev Returns `true` if a request is valid for a provided `signature` at the current block timestamp.
*
* A transaction is considered valid when the target trusts this forwarder, the request hasn't expired
* (deadline is not met), and the signer matches the `from` parameter of the signed request.
*
* NOTE: A request may return false here but it won't cause {executeBatch} to revert if a refund
* receiver is provided.
*/
function verify(ForwardRequestData calldata request) public view virtual returns (bool) {
(bool isTrustedForwarder, bool active, bool signerMatch, ) = _validate(request);
return isTrustedForwarder && active && signerMatch;
}
/**
* @dev Executes a `request` on behalf of `signature`'s signer using the ERC-2771 protocol. The gas
* provided to the requested call may not be exactly the amount requested, but the call will not run
* out of gas. Will revert if the request is invalid or the call reverts, in this case the nonce is not consumed.
*
* Requirements:
*
* - The request value should be equal to the provided `msg.value`.
* - The request should be valid according to {verify}.
*/
function execute(ForwardRequestData calldata request) public payable virtual {
// We make sure that msg.value and request.value match exactly.
// If the request is invalid or the call reverts, this whole function
// will revert, ensuring value isn't stuck.
if (msg.value != request.value) {
revert ERC2771ForwarderMismatchedValue(request.value, msg.value);
}
if (!_execute(request, true)) {
revert FailedInnerCall();
}
}
/**
* @dev Batch version of {execute} with optional refunding and atomic execution.
*
* In case a batch contains at least one invalid request (see {verify}), the
* request will be skipped and the `refundReceiver` parameter will receive back the
* unused requested value at the end of the execution. This is done to prevent reverting
* the entire batch when a request is invalid or has already been submitted.
*
* If the `refundReceiver` is the `address(0)`, this function will revert when at least
* one of the requests was not valid instead of skipping it. This could be useful if
* a batch is required to get executed atomically (at least at the top-level). For example,
* refunding (and thus atomicity) can be opt-out if the relayer is using a service that avoids
* including reverted transactions.
*
* Requirements:
*
* - The sum of the requests' values should be equal to the provided `msg.value`.
* - All of the requests should be valid (see {verify}) when `refundReceiver` is the zero address.
*
* NOTE: Setting a zero `refundReceiver` guarantees an all-or-nothing requests execution only for
* the first-level forwarded calls. In case a forwarded request calls to a contract with another
* subcall, the second-level call may revert without the top-level call reverting.
*/
function executeBatch(ForwardRequestData[] calldata requests, address payable refundReceiver)
public
payable
virtual
{
bool atomic = refundReceiver == address(0);
uint256 requestsValue;
uint256 refundValue;
for (uint256 i; i < requests.length; ++i) {
requestsValue += requests[i].value;
bool success = _execute(requests[i], atomic);
if (!success) {
refundValue += requests[i].value;
}
}
// The batch should revert if there's a mismatched msg.value provided
// to avoid request value tampering
if (requestsValue != msg.value) {
revert ERC2771ForwarderMismatchedValue(requestsValue, msg.value);
}
// Some requests with value were invalid (possibly due to frontrunning).
// To avoid leaving ETH in the contract this value is refunded.
if (refundValue != 0) {
// We know refundReceiver != address(0) && requestsValue == msg.value
// meaning we can ensure refundValue is not taken from the original contract's balance
// and refundReceiver is a known account.
Address.sendValue(refundReceiver, refundValue);
}
}
/**
* @dev Validates if the provided request can be executed at current block timestamp with
* the given `request.signature` on behalf of `request.signer`.
*/
function _validate(ForwardRequestData calldata request)
internal
view
virtual
returns (
bool isTrustedForwarder,
bool active,
bool signerMatch,
address signer
)
{
(bool isValid, address recovered) = _recoverForwardRequestSigner(request);
return (
_isTrustedByTarget(request.to),
request.deadline >= block.timestamp,
isValid && recovered == request.from,
recovered
);
}
/**
* @dev Returns a tuple with the recovered the signer of an EIP712 forward request message hash
* and a boolean indicating if the signature is valid.
*
* NOTE: The signature is considered valid if {ECDSA-tryRecover} indicates no recover error for it.
*/
function _recoverForwardRequestSigner(ForwardRequestData calldata request)
internal
view
virtual
returns (bool, address)
{
(address recovered, ECDSA.RecoverError err) = _hashTypedDataV4(
keccak256(
abi.encode(
_FORWARD_REQUEST_TYPEHASH,
request.from,
request.to,
request.value,
request.gas,
nonces(request.from),
request.deadline,
keccak256(request.data)
)
)
).tryRecover(request.signature);
return (err == ECDSA.RecoverError.NoError, recovered);
}
/**
* @dev Validates and executes a signed request returning the request call `success` value.
*
* Internal function without msg.value validation.
*
* Requirements:
*
* - The caller must have provided enough gas to forward with the call.
* - The request must be valid (see {verify}) if the `requireValidRequest` is true.
*
* Emits an {ExecutedForwardRequest} event.
*
* IMPORTANT: Using this function doesn't check that all the `msg.value` was sent, potentially
* leaving value stuck in the contract.
*/
function _execute(ForwardRequestData calldata request, bool requireValidRequest)
internal
virtual
returns (bool success)
{
(bool isTrustedForwarder, bool active, bool signerMatch, address signer) = _validate(request);
// Need to explicitly specify if a revert is required since non-reverting is default for
// batches and reversion is opt-in since it could be useful in some scenarios
if (requireValidRequest) {
if (!isTrustedForwarder) {
revert ERC2771UntrustfulTarget(request.to, address(this));
}
if (!active) {
revert ERC2771ForwarderExpiredRequest(request.deadline);
}
if (!signerMatch) {
revert ERC2771ForwarderInvalidSigner(signer, request.from);
}
}
// Ignore an invalid request because requireValidRequest = false
if (isTrustedForwarder && signerMatch && active) {
// Nonce should be used before the call to prevent reusing by reentrancy
uint256 currentNonce = _useNonce(signer);
uint256 reqGas = request.gas;
address to = request.to;
uint256 value = request.value;
bytes memory data = abi.encodePacked(request.data, request.from);
uint256 gasLeft;
assembly {
success := call(reqGas, to, value, add(data, 0x20), mload(data), 0, 0)
gasLeft := gas()
}
_checkForwardedGas(gasLeft, request);
emit ExecutedForwardRequest(signer, currentNonce, success);
}
}
/**
* @dev Returns whether the target trusts this forwarder.
*
* This function performs a static call to the target contract calling the
* {ERC2771Context-isTrustedForwarder} function.
*/
function _isTrustedByTarget(address target) private view returns (bool) {
bytes memory encodedParams = abi.encodeCall(ERC2771Context.isTrustedForwarder, (address(this)));
bool success;
uint256 returnSize;
uint256 returnValue;
/// @solidity memory-safe-assembly
assembly {
// Perform the staticcal and save the result in the scratch space.
// | Location | Content | Content (Hex) |
// |-----------|----------|--------------------------------------------------------------------|
// | | | result ↓ |
// | 0x00:0x1F | selector | 0x0000000000000000000000000000000000000000000000000000000000000001 |
success := staticcall(gas(), target, add(encodedParams, 0x20), mload(encodedParams), 0, 0x20)
returnSize := returndatasize()
returnValue := mload(0)
}
return success && returnSize >= 0x20 && returnValue > 0;
}
/**
* @dev Checks if the requested gas was correctly forwarded to the callee.
*
* As a consequence of https://eips.ethereum.org/EIPS/eip-150[EIP-150]:
* - At most `gasleft() - floor(gasleft() / 64)` is forwarded to the callee.
* - At least `floor(gasleft() / 64)` is kept in the caller.
*
* It reverts consuming all the available gas if the forwarded gas is not the requested gas.
*
* IMPORTANT: The `gasLeft` parameter should be measured exactly at the end of the forwarded call.
* Any gas consumed in between will make room for bypassing this check.
*/
function _checkForwardedGas(uint256 gasLeft, ForwardRequestData calldata request) private pure {
// To avoid insufficient gas griefing attacks, as referenced in https://ronan.eth.limo/blog/ethereum-gas-dangers/
//
// A malicious relayer can attempt to shrink the gas forwarded so that the underlying call reverts out-of-gas
// but the forwarding itself still succeeds. In order to make sure that the subcall received sufficient gas,
// we will inspect gasleft() after the forwarding.
//
// Let X be the gas available before the subcall, such that the subcall gets at most X * 63 / 64.
// We can't know X after CALL dynamic costs, but we want it to be such that X * 63 / 64 >= req.gas.
// Let Y be the gas used in the subcall. gasleft() measured immediately after the subcall will be gasleft() = X - Y.
// If the subcall ran out of gas, then Y = X * 63 / 64 and gasleft() = X - Y = X / 64.
// Under this assumption req.gas / 63 > gasleft() is true is true if and only if
// req.gas / 63 > X / 64, or equivalently req.gas > X * 63 / 64.
// This means that if the subcall runs out of gas we are able to detect that insufficient gas was passed.
//
// We will now also see that req.gas / 63 > gasleft() implies that req.gas >= X * 63 / 64.
// The contract guarantees Y <= req.gas, thus gasleft() = X - Y >= X - req.gas.
// - req.gas / 63 > gasleft()
// - req.gas / 63 >= X - req.gas
// - req.gas >= X * 63 / 64
// In other words if req.gas < X * 63 / 64 then req.gas / 63 <= gasleft(), thus if the relayer behaves honestly
// the forwarding does not revert.
if (gasLeft < request.gas / 63) {
// We explicitly trigger invalid opcode to consume all gas and bubble-up the effects, since
// neither revert or assert consume all gas since Solidity 0.8.20
// https://docs.soliditylang.org/en/v0.8.20/control-structures.html#panic-via-assert-and-error-via-require
/// @solidity memory-safe-assembly
assembly {
invalid()
}
}
}
}

View File

@@ -0,0 +1,51 @@
// SPDX-License-Identifier: MIT
// @note This file is directly copied from OpenZeppelin's master branch:
// https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/utils/Nonces.sol
// Modifications are made to make it compatible with solidity 0.8.16.
pragma solidity ^0.8.16;
/**
* @dev Provides tracking nonces for addresses. Nonces will only increment.
*/
abstract contract Nonces {
/**
* @dev The nonce used for an `account` is not the expected current nonce.
*/
error InvalidAccountNonce(address account, uint256 currentNonce);
mapping(address => uint256) private _nonces;
/**
* @dev Returns an the next unused nonce for an address.
*/
function nonces(address owner) public view virtual returns (uint256) {
return _nonces[owner];
}
/**
* @dev Consumes a nonce.
*
* Returns the current value and increments nonce.
*/
function _useNonce(address owner) internal virtual returns (uint256) {
// For each account, the nonce has an initial value of 0, can only be incremented by one, and cannot be
// decremented or reset. This guarantees that the nonce never overflows.
unchecked {
// It is important to do x++ and not ++x here.
return _nonces[owner]++;
}
}
/**
* @dev Same as {_useNonce} but checking that `nonce` is the next valid for `owner`.
*/
function _useCheckedNonce(address owner, uint256 nonce) internal virtual returns (uint256) {
uint256 current = _useNonce(owner);
if (nonce != current) {
revert InvalidAccountNonce(owner, current);
}
return current;
}
}

View File

@@ -23,6 +23,20 @@ contract L2TxFeeVaultTest is DSTestPlus {
vault.withdraw();
}
function testCantWithdrawAmountBelowMinimum(uint256 amount) public {
amount = bound(amount, 0 ether, 10 ether - 1);
hevm.deal(address(vault), 100 ether);
hevm.expectRevert("FeeVault: withdrawal amount must be greater than minimum withdrawal amount");
vault.withdraw(amount);
}
function testCantWithdrawMoreThanBalance(uint256 amount) public {
hevm.assume(amount >= 10 ether);
hevm.deal(address(vault), amount - 1);
hevm.expectRevert(new bytes(0));
vault.withdraw(amount);
}
function testWithdrawOnce() public {
hevm.deal(address(vault), 11 ether);
vault.withdraw();
@@ -30,6 +44,17 @@ contract L2TxFeeVaultTest is DSTestPlus {
assertEq(vault.totalProcessed(), 11 ether);
}
function testWithdrawAmountOnce(uint256 amount) public {
amount = bound(amount, 10 ether, 100 ether);
hevm.deal(address(vault), 100 ether);
vault.withdraw(amount);
assertEq(address(messenger).balance, amount);
assertEq(vault.totalProcessed(), amount);
assertEq(address(vault).balance, 100 ether - amount);
}
function testWithdrawTwice() public {
hevm.deal(address(vault), 11 ether);
vault.withdraw();
@@ -41,4 +66,21 @@ contract L2TxFeeVaultTest is DSTestPlus {
assertEq(address(messenger).balance, 33 ether);
assertEq(vault.totalProcessed(), 33 ether);
}
function testWithdrawAmountTwice(uint256 amount1, uint256 amount2) public {
amount1 = bound(amount1, 10 ether, 100 ether);
amount2 = bound(amount2, 10 ether, 100 ether);
hevm.deal(address(vault), 200 ether);
vault.withdraw(amount1);
assertEq(address(messenger).balance, amount1);
assertEq(vault.totalProcessed(), amount1);
vault.withdraw(amount2);
assertEq(address(messenger).balance, amount1 + amount2);
assertEq(vault.totalProcessed(), amount1 + amount2);
assertEq(address(vault).balance, 200 ether - amount1 - amount2);
}
}

View File

@@ -159,32 +159,39 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
}
timeout.Inc()
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
err := c.db.Transaction(func(tx *gorm.DB) error {
// update prover task proving status as ProverProofInvalid
if err := c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, assignedProverTask.UUID, types.ProverProofInvalid, tx); err != nil {
if err := c.proverTaskOrm.UpdateProverTaskProvingStatusAndFailureType(c.ctx, assignedProverTask.UUID, types.ProverProofInvalid, types.ProverTaskFailureTypeTimeout, tx); err != nil {
log.Error("update prover task proving status failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
// update prover task failure type
if err := c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, assignedProverTask.UUID, types.ProverTaskFailureTypeTimeout, tx); err != nil {
log.Error("update prover task failure type failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
switch message.ProofType(assignedProverTask.TaskType) {
case message.ProofTypeChunk:
if err := c.chunkOrm.DecreaseActiveAttemptsByHash(c.ctx, assignedProverTask.TaskID, tx); err != nil {
log.Error("decrease chunk active attempts failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
if err := c.chunkOrm.UpdateProvingStatusFailed(c.ctx, assignedProverTask.TaskID, c.cfg.ProverManager.SessionAttempts, tx); err != nil {
log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
case message.ProofTypeBatch:
if err := c.batchOrm.DecreaseActiveAttemptsByHash(c.ctx, assignedProverTask.TaskID, tx); err != nil {
log.Error("decrease batch active attempts failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
if err := c.batchOrm.UpdateProvingStatusFailed(c.ctx, assignedProverTask.TaskID, c.cfg.ProverManager.SessionAttempts, tx); err != nil {
log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
}
// update the task to unassigned, let collector restart it
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
if err := c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
}
}
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
if err := c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
}
}
return nil
})
if err != nil {

View File

@@ -15,7 +15,6 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
@@ -25,7 +24,6 @@ import (
// BatchProverTask is prover task implement for batch proof
type BatchProverTask struct {
BaseProverTask
vk string
batchAttemptsExceedTotal prometheus.Counter
batchTaskGetTaskTotal prometheus.Counter
@@ -35,13 +33,13 @@ type BatchProverTask struct {
func NewBatchProverTask(cfg *config.Config, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
vk: vk,
db: db,
cfg: cfg,
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
},
vk: vk,
batchAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_attempts_exceed_total",
Help: "Total number of batch attempts exceed.",
@@ -56,71 +54,30 @@ func NewBatchProverTask(cfg *config.Config, db *gorm.DB, vk string, reg promethe
// Assign load and assign batch tasks
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return nil, fmt.Errorf("get public key from context failed")
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return nil, fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
if getTaskParameter.VK == "" { // allow vk being empty, because for the first time the prover may not know its vk
if !version.CheckScrollProverVersionTag(proverVersion.(string)) { // but reject too-old provers
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
} else if getTaskParameter.VK != bp.vk { // non-empty vk but different
if version.CheckScrollProverVersion(proverVersion.(string)) { // same prover version but different vks
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
// different prover versions and different vks
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
isAssigned, err := bp.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check if prover is assigned a task: %w", err)
}
if isAssigned {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task", publicKey)
}
batchTasks, err := bp.batchOrm.UpdateUnassignedBatchReturning(ctx, 1)
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
batchTask, err := bp.batchOrm.UpdateBatchAttemptsReturning(ctx, maxActiveAttempts, maxTotalAttempts)
if err != nil {
return nil, fmt.Errorf("failed to get unassigned batch proving tasks, error:%w", err)
}
if len(batchTasks) == 0 {
if batchTask == nil {
return nil, nil
}
if len(batchTasks) != 1 {
log.Error("get unassigned batch proving task len not 1", "length", len(batchTasks), "batch tasks", batchTasks)
return nil, ErrCoordinatorInternalFailure
}
batchTask := batchTasks[0]
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", publicKey, "prover name", proverName)
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
bp.batchAttemptsExceedTotal.Inc()
// TODO: retry fetching unassigned batch proving task
log.Error("batch task proving attempts reach the maximum", "hash", batchTask.Hash)
return nil, nil
}
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: publicKey.(string),
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: proverName.(string),
ProverVersion: proverVersion.(string),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
@@ -129,14 +86,14 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
bp.recoverProvingStatus(ctx, batchTask)
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", publicKey, "err", err)
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx, &proverTask)
if err != nil {
bp.recoverProvingStatus(ctx, batchTask)
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
@@ -193,10 +150,8 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
return taskMsg, nil
}
// recoverProvingStatus if not return the batch task to prover success,
// need recover the proving status to unassigned
func (bp *BatchProverTask) recoverProvingStatus(ctx *gin.Context, batchTask *orm.Batch) {
if err := bp.batchOrm.UpdateProvingStatus(ctx, batchTask.Hash, types.ProvingTaskUnassigned); err != nil {
log.Warn("failed to recover batch proving status", "hash:", batchTask.Hash, "error", err)
func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *orm.Batch) {
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx, batchTask.Hash); err != nil {
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
}
}

View File

@@ -15,7 +15,6 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
@@ -28,7 +27,6 @@ var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ChunkProverTask the chunk prover task
type ChunkProverTask struct {
BaseProverTask
vk string
chunkAttemptsExceedTotal prometheus.Counter
chunkTaskGetTaskTotal prometheus.Counter
@@ -38,13 +36,13 @@ type ChunkProverTask struct {
func NewChunkProverTask(cfg *config.Config, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
vk: vk,
db: db,
cfg: cfg,
chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
},
vk: vk,
chunkAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_chunk_attempts_exceed_total",
Help: "Total number of chunk attempts exceed.",
@@ -59,74 +57,31 @@ func NewChunkProverTask(cfg *config.Config, db *gorm.DB, vk string, reg promethe
// Assign the chunk proof which need to prove
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return nil, fmt.Errorf("get public key from context failed")
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return nil, fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
if getTaskParameter.VK == "" { // allow vk being empty, because for the first time the prover may not know its vk
if !version.CheckScrollProverVersionTag(proverVersion.(string)) { // but reject too-old provers
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
} else if getTaskParameter.VK != cp.vk { // non-empty vk but different
if version.CheckScrollProverVersion(proverVersion.(string)) { // same prover version but different vks
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
// different prover versions and different vks
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
isAssigned, err := cp.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check if prover is assigned a task: %w", err)
}
if isAssigned {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task", publicKey)
}
// load and send chunk tasks
chunkTasks, err := cp.chunkOrm.UpdateUnassignedChunkReturning(ctx, getTaskParameter.ProverHeight, 1)
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := cp.cfg.ProverManager.SessionAttempts
chunkTask, err := cp.chunkOrm.UpdateChunkAttemptsReturning(ctx, getTaskParameter.ProverHeight, maxActiveAttempts, maxTotalAttempts)
if err != nil {
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if len(chunkTasks) == 0 {
if chunkTask == nil {
return nil, nil
}
if len(chunkTasks) != 1 {
log.Error("get unassigned chunk proving task len not 1", "length", len(chunkTasks), "chunk tasks", chunkTasks)
return nil, ErrCoordinatorInternalFailure
}
chunkTask := chunkTasks[0]
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", publicKey, "prover name", proverName)
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
cp.chunkAttemptsExceedTotal.Inc()
// TODO: retry fetching unassigned chunk proving task
log.Error("chunk task proving attempts reach the maximum", "hash", chunkTask.Hash)
return nil, nil
}
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: publicKey.(string),
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: proverName.(string),
ProverVersion: proverVersion.(string),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
@@ -134,14 +89,14 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
if err = cp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
cp.recoverProvingStatus(ctx, chunkTask)
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", publicKey, "err", err)
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx, &proverTask)
if err != nil {
cp.recoverProvingStatus(ctx, chunkTask)
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
@@ -181,10 +136,8 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
return proverTaskSchema, nil
}
// recoverProvingStatus if not return the batch task to prover success,
// need recover the proving status to unassigned
func (cp *ChunkProverTask) recoverProvingStatus(ctx *gin.Context, chunkTask *orm.Chunk) {
if err := cp.chunkOrm.UpdateProvingStatus(ctx, chunkTask.Hash, types.ProvingTaskUnassigned); err != nil {
log.Warn("failed to recover chunk proving status", "hash:", chunkTask.Hash, "error", err)
func (cp *ChunkProverTask) recoverActiveAttempts(ctx *gin.Context, chunkTask *orm.Chunk) {
if err := cp.chunkOrm.DecreaseActiveAttemptsByHash(ctx, chunkTask.Hash); err != nil {
log.Error("failed to recover chunk active attempts", "hash", chunkTask.Hash, "error", err)
}
}

View File

@@ -1,14 +1,12 @@
package provertask
import (
"context"
"fmt"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
@@ -23,8 +21,8 @@ type ProverTask interface {
// BaseProverTask a base prover task which contain series functions
type BaseProverTask struct {
cfg *config.Config
ctx context.Context
db *gorm.DB
vk string
batchOrm *orm.Batch
chunkOrm *orm.Chunk
@@ -32,36 +30,53 @@ type BaseProverTask struct {
proverTaskOrm *orm.ProverTask
}
// checkAttempts use the count of prover task info to check the attempts
func (b *BaseProverTask) checkAttemptsExceeded(hash string, taskType message.ProofType) bool {
whereFields := make(map[string]interface{})
whereFields["task_id"] = hash
whereFields["task_type"] = int16(taskType)
proverTasks, err := b.proverTaskOrm.GetProverTasks(b.ctx, whereFields, nil, 0, 0)
if err != nil {
log.Error("get prover task error", "hash id", hash, "error", err)
return true
}
if len(proverTasks) >= int(b.cfg.ProverManager.SessionAttempts) {
log.Warn("proof generation prover task reach the max attempts", "hash", hash)
transErr := b.db.Transaction(func(tx *gorm.DB) error {
switch message.ProofType(proverTasks[0].TaskType) {
case message.ProofTypeChunk:
if err := b.chunkOrm.UpdateProvingStatus(b.ctx, hash, types.ProvingTaskFailed, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
}
case message.ProofTypeBatch:
if err := b.batchOrm.UpdateProvingStatus(b.ctx, hash, types.ProvingTaskFailed, tx); err != nil {
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
}
}
return nil
})
if transErr == nil {
return false
}
}
return true
type proverTaskContext struct {
PublicKey string
ProverName string
ProverVersion string
}
// checkParameter check the prover task parameter illegal
func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*proverTaskContext, error) {
var ptc proverTaskContext
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return nil, fmt.Errorf("get public key from context failed")
}
ptc.PublicKey = publicKey.(string)
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return nil, fmt.Errorf("get prover name from context failed")
}
ptc.ProverName = proverName.(string)
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
ptc.ProverVersion = proverVersion.(string)
if getTaskParameter.VK == "" { // allow vk being empty, because for the first time the prover may not know its vk
if !version.CheckScrollProverVersionTag(proverVersion.(string)) { // but reject too-old provers
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
} else if getTaskParameter.VK != b.vk { // non-empty vk but different
if version.CheckScrollProverVersion(proverVersion.(string)) { // same prover version but different vks
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
// different prover versions and different vks
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check if prover is assigned a task: %w", err)
}
if isAssigned {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task", publicKey)
}
return &ptc, nil
}

View File

@@ -174,7 +174,8 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if verifyErr != nil || !success {
m.verifierFailureTotal.WithLabelValues(pv).Inc()
m.proofRecover(ctx, proverTask, proofMsg)
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
@@ -192,7 +193,9 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc()
m.proofRecover(ctx, proverTask, proofMsg)
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
return ErrCoordinatorInternalFailure
}
@@ -226,7 +229,8 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
}()
// Ensure this prover is eligible to participate in the prover task.
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid {
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid ||
types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid {
m.validateFailureProverTaskSubmitTwice.Inc()
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
@@ -247,10 +251,8 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
if proofMsg.Status != message.StatusOk {
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
// Verify if the proving task has already been assigned to another prover.
// Upon receiving an error message, it's possible the proving status has been reset by another prover
// and the task has been reassigned. In this case, the coordinator should avoid resetting the proving status.
m.processProverErr(ctx, proverTask)
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeSubmitStatusNotOk, proofMsg)
m.validateFailureProverTaskStatusNotOk.Inc()
@@ -285,20 +287,20 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
return nil
}
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg) {
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, failureType types.ProverTaskFailureType, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskUnassigned.String())
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProverProofInvalid, failureType, 0); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", proverTask.TaskID, "pubKey", proverTask.ProverPublicKey, "error", err)
}
}
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
return err
}
@@ -306,52 +308,46 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm
}
// UpdateProofStatus update the chunk/batch task and session info status
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg, status types.ProvingStatus, proofTimeSec uint64) error {
var proverTaskStatus types.ProverProveStatus
switch status {
case types.ProvingTaskFailed, types.ProvingTaskUnassigned:
proverTaskStatus = types.ProverProofInvalid
case types.ProvingTaskVerified:
proverTaskStatus = types.ProverProofValid
}
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *orm.ProverTask,
proofMsg *message.ProofMsg, status types.ProverProveStatus, failureType types.ProverTaskFailureType, proofTimeSec uint64) error {
err := m.db.Transaction(func(tx *gorm.DB) error {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proverTask.UUID, proverTaskStatus, tx); updateErr != nil {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatusAndFailureType(ctx, proverTask.UUID, status, failureType, tx); updateErr != nil {
log.Error("failed to update prover task proving status and failure type", "uuid", proverTask.UUID, "error", updateErr)
return updateErr
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if m.checkIsTaskSuccess(ctx, proverTask.TaskID, proofMsg.Type) {
log.Info("update proof status skip because this chunk / batch has been verified", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey)
return nil
}
if status == types.ProvingTaskVerified {
var storeProofErr error
switch proofMsg.Type {
case message.ProofTypeChunk:
storeProofErr = m.chunkOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, proofTimeSec, tx)
case message.ProofTypeBatch:
storeProofErr = m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.BatchProof, proofTimeSec, tx)
}
if storeProofErr != nil {
log.Error("failed to store chunk/batch proof into db", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey, "error", storeProofErr)
return storeProofErr
}
}
switch proofMsg.Type {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatus(ctx, proverTask.TaskID, status, tx); err != nil {
if err := m.chunkOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "hash", proverTask.TaskID, "error", err)
return err
}
case message.ProofTypeBatch:
if err := m.batchOrm.UpdateProvingStatus(ctx, proverTask.TaskID, status, tx); err != nil {
if err := m.batchOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil {
log.Error("failed to update batch proving_status as failed", "hash", proverTask.TaskID, "error", err)
return err
}
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if m.checkIsTaskSuccess(ctx, proverTask.TaskID, proofMsg.Type) {
log.Info("update proof status skip because this chunk/batch has been verified", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey)
return nil
}
if status == types.ProverProofValid {
var storeProofErr error
switch proofMsg.Type {
case message.ProofTypeChunk:
storeProofErr = m.chunkOrm.UpdateProofAndProvingStatusByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, types.ProvingTaskVerified, proofTimeSec, tx)
case message.ProofTypeBatch:
storeProofErr = m.batchOrm.UpdateProofAndProvingStatusByHash(ctx, proofMsg.ID, proofMsg.BatchProof, types.ProvingTaskVerified, proofTimeSec, tx)
}
if storeProofErr != nil {
log.Error("failed to store chunk/batch proof and proving status", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey, "error", storeProofErr)
return storeProofErr
}
}
return nil
})
@@ -359,7 +355,7 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
return err
}
if status == types.ProvingTaskVerified && proofMsg.Type == message.ProofTypeChunk {
if status == types.ProverProofValid && proofMsg.Type == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
return checkReadyErr
@@ -389,34 +385,6 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
return provingStatus == types.ProvingTaskVerified
}
func (m *ProofReceiverLogic) processProverErr(ctx context.Context, proverTask *orm.ProverTask) {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proverTask.UUID, types.ProverProofInvalid); updateErr != nil {
log.Error("update prover task proving status failure", "uuid", proverTask.UUID, "taskID", proverTask.TaskID, "proverPublicKey",
proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", updateErr)
}
proverTasks, err := m.proverTaskOrm.GetAssignedTaskOfOtherProvers(ctx, message.ProofType(proverTask.TaskType), proverTask.TaskID, proverTask.ProverPublicKey)
if err != nil {
log.Warn("checkIsAssignedToOtherProver failure", "taskID", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", err)
return
}
if len(proverTasks) > 0 {
return
}
switch message.ProofType(proverTask.TaskType) {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatusFromProverError(ctx, proverTask.TaskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update chunk proving_status as failed", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", err)
}
case message.ProofTypeBatch:
if err := m.batchOrm.UpdateProvingStatusFromProverError(ctx, proverTask.TaskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update batch proving_status as failed", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", err)
}
}
}
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg) error {
// store the proof to prover task
var proofBytes []byte

View File

@@ -14,6 +14,7 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
)
const defaultBatchHeaderVersion = 0
@@ -41,6 +42,8 @@ type Batch struct {
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
TotalAttempts int16 `json:"total_attempts" gorm:"column:total_attempts;default:0"`
ActiveAttempts int16 `json:"active_attempts" gorm:"column:active_attempts;default:0"`
// rollup
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
@@ -151,6 +154,18 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
return &latestBatch, nil
}
// GetAttemptsByHash get batch attempts by hash. Used by unit test
func (o *Batch) GetAttemptsByHash(ctx context.Context, hash string) (int16, int16, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash = ?", hash)
var batch Batch
if err := db.Find(&batch).Error; err != nil {
return 0, 0, fmt.Errorf("Batch.GetAttemptsByHash error: %w, batch hash: %v", err, hash)
}
return batch.ActiveAttempts, batch.TotalAttempts, nil
}
// InsertBatch inserts a new batch into the database.
// for unit test
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
@@ -211,6 +226,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
BatchHeader: batchHeader.Encode(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
TotalAttempts: 0,
ActiveAttempts: 0,
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
}
@@ -242,60 +259,25 @@ func (o *Batch) UpdateChunkProofsStatusByBatchHash(ctx context.Context, batchHas
return nil
}
// UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
// UpdateProvingStatusFailed updates the proving status failed of a batch.
func (o *Batch) UpdateProvingStatusFailed(ctx context.Context, hash string, maxAttempts uint8, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
db = db.Where("total_attempts >= ?", maxAttempts)
db = db.Where("proving_status != ?", int(types.ProverProofValid))
if err := db.Update("proving_status", int(types.ProvingTaskFailed)).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, types.ProvingTaskFailed.String())
}
return nil
}
// UpdateProvingStatusFromProverError updates batch proving status when prover prove failed
func (o *Batch) UpdateProvingStatusFromProverError(ctx context.Context, hash string, status types.ProvingStatus) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash).Where("proving_status", types.ProvingTaskAssigned)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatusOptimistic error: %w, batch hash: %v, status: %v", err, hash, status.String())
}
return nil
}
// UpdateProofByHash updates the batch proof by hash.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.BatchProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
// UpdateProofAndProvingStatusByHash updates the batch proof and proving status by hash.
func (o *Batch) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.BatchProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
@@ -307,7 +289,9 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa
updateFields := make(map[string]interface{})
updateFields["proof"] = proofBytes
updateFields["proving_status"] = provingStatus
updateFields["proof_time_sec"] = proofTimeSec
updateFields["proved_at"] = utils.NowUTC()
db = db.WithContext(ctx)
db = db.Model(&Batch{})
@@ -319,28 +303,50 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa
return nil
}
// UpdateUnassignedBatchReturning update the unassigned batch and return the update record
func (o *Batch) UpdateUnassignedBatchReturning(ctx context.Context, limit int) ([]*Batch, error) {
if limit < 0 {
return nil, errors.New("limit must not be smaller than zero")
}
if limit == 0 {
return nil, nil
}
// UpdateBatchAttemptsReturning atomically increments the attempts count for the earliest available batch that meets the conditions.
func (o *Batch) UpdateBatchAttemptsReturning(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
db := o.db.WithContext(ctx)
subQueryDB := db.Model(&Batch{}).Select("index")
subQueryDB = subQueryDB.Where("proving_status = ? AND chunk_proofs_status = ?", types.ProvingTaskUnassigned, types.ChunkProofsStatusReady)
subQueryDB = subQueryDB.Clauses(clause.Locking{Strength: "UPDATE"})
subQueryDB = subQueryDB.Where("proving_status not in (?)", []int{int(types.ProvingTaskVerified), int(types.ProvingTaskFailed)})
subQueryDB = subQueryDB.Where("total_attempts < ?", maxTotalAttempts)
subQueryDB = subQueryDB.Where("active_attempts < ?", maxActiveAttempts)
subQueryDB = subQueryDB.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
subQueryDB = subQueryDB.Order("index ASC")
subQueryDB = subQueryDB.Limit(limit)
subQueryDB = subQueryDB.Limit(1)
var batches []*Batch
db = db.Model(&batches).Clauses(clause.Returning{})
var updatedBatch Batch
db = db.Model(&updatedBatch).Clauses(clause.Returning{})
db = db.Where("index = (?)", subQueryDB)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
return nil, fmt.Errorf("Batch.UpdateUnassignedBatchReturning error: %w", err)
result := db.Updates(map[string]interface{}{
"proving_status": types.ProvingTaskAssigned,
"total_attempts": gorm.Expr("total_attempts + 1"),
"active_attempts": gorm.Expr("active_attempts + 1"),
})
if result.Error != nil {
return nil, fmt.Errorf("failed to select and update batch, max active attempts: %v, max total attempts: %v, err: %w",
maxActiveAttempts, maxTotalAttempts, result.Error)
}
return batches, nil
if result.RowsAffected == 0 {
return nil, nil
}
return &updatedBatch, nil
}
// DecreaseActiveAttemptsByHash decrements the active_attempts of a batch given its hash.
func (o *Batch) DecreaseActiveAttemptsByHash(ctx context.Context, batchHash string, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash = ?", batchHash)
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
if err := db.UpdateColumn("active_attempts", gorm.Expr("active_attempts - 1")).Error; err != nil {
return fmt.Errorf("Batch.DecreaseActiveAttemptsByHash error: %w, batch hash: %v", err, batchHash)
}
return nil
}

View File

@@ -13,6 +13,7 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
)
// Chunk represents a chunk of blocks in the database.
@@ -40,6 +41,8 @@ type Chunk struct {
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
TotalAttempts int16 `json:"total_attempts" gorm:"column:total_attempts;default:0"`
ActiveAttempts int16 `json:"active_attempts" gorm:"column:active_attempts;default:0"`
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
@@ -195,6 +198,18 @@ func (o *Chunk) GetChunkBatchHash(ctx context.Context, chunkHash string) (string
return chunk.BatchHash, nil
}
// GetAttemptsByHash get chunk attempts by hash. Used by unit test
func (o *Chunk) GetAttemptsByHash(ctx context.Context, hash string) (int16, int16, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash = ?", hash)
var chunk Chunk
if err := db.Find(&chunk).Error; err != nil {
return 0, 0, fmt.Errorf("Batch.GetAttemptsByHash error: %w, batch hash: %v", err, hash)
}
return chunk.ActiveAttempts, chunk.TotalAttempts, nil
}
// InsertChunk inserts a new chunk into the database.
// for unit test
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
@@ -259,6 +274,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
TotalAttempts: 0,
ActiveAttempts: 0,
}
db := o.db
@@ -275,19 +292,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
return &newChunk, nil
}
// UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
// UpdateProvingStatusFailed updates the proving status failed of a batch.
func (o *Chunk) UpdateProvingStatusFailed(ctx context.Context, hash string, maxAttempts uint8, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
@@ -295,31 +301,16 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatus error: %w, chunk hash: %v, status: %v", err, hash, status.String())
db = db.Where("total_attempts >= ?", maxAttempts)
db = db.Where("proving_status != ?", int(types.ProverProofValid))
if err := db.Update("proving_status", int(types.ProvingTaskFailed)).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, types.ProvingTaskFailed.String())
}
return nil
}
// UpdateProvingStatusFromProverError updates chunk proving status when prover prove failed
func (o *Chunk) UpdateProvingStatusFromProverError(ctx context.Context, hash string, status types.ProvingStatus) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
updateFields["prover_assigned_at"] = nil
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash).Where("proving_status", types.ProvingTaskAssigned)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatusOptimistic error: %w, chunk hash: %v, status: %v", err, hash, status.String())
}
return nil
}
// UpdateProofByHash updates the chunk proof by hash.
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.ChunkProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
// UpdateProofAndProvingStatusByHash updates the chunk proof and proving_status by hash.
func (o *Chunk) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.ChunkProof, status types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
@@ -331,7 +322,9 @@ func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *messa
updateFields := make(map[string]interface{})
updateFields["proof"] = proofBytes
updateFields["proving_status"] = int(status)
updateFields["proof_time_sec"] = proofTimeSec
updateFields["proved_at"] = utils.NowUTC()
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
@@ -357,32 +350,54 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
return nil
}
// UpdateUnassignedChunkReturning update the unassigned batch which end_block_number <= height and return the update record
func (o *Chunk) UpdateUnassignedChunkReturning(ctx context.Context, height, limit int) ([]*Chunk, error) {
// UpdateChunkAttemptsReturning atomically increments the attempts count for the earliest available chunk that meets the conditions.
func (o *Chunk) UpdateChunkAttemptsReturning(ctx context.Context, height int, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
if height <= 0 {
return nil, errors.New("Chunk.UpdateUnassignedBatchReturning error: height must be larger than zero")
}
if limit < 0 {
return nil, errors.New("Chunk.UpdateUnassignedBatchReturning error: limit must not be smaller than zero")
}
if limit == 0 {
return nil, nil
return nil, errors.New("Chunk.UpdateChunkAttemptsReturning error: height must be larger than zero")
}
db := o.db.WithContext(ctx)
subQueryDB := db.Model(&Chunk{}).Select("index")
subQueryDB = subQueryDB.Where("proving_status = ?", types.ProvingTaskUnassigned)
subQueryDB = subQueryDB.Clauses(clause.Locking{Strength: "UPDATE"})
subQueryDB = subQueryDB.Where("proving_status not in (?)", []int{int(types.ProvingTaskVerified), int(types.ProvingTaskFailed)})
subQueryDB = subQueryDB.Where("total_attempts < ?", maxTotalAttempts)
subQueryDB = subQueryDB.Where("active_attempts < ?", maxActiveAttempts)
subQueryDB = subQueryDB.Where("end_block_number <= ?", height)
subQueryDB = subQueryDB.Order("index ASC")
subQueryDB = subQueryDB.Limit(limit)
subQueryDB = subQueryDB.Limit(1)
var chunks []*Chunk
db = db.Model(&chunks).Clauses(clause.Returning{})
var updatedChunk Chunk
db = db.Model(&updatedChunk).Clauses(clause.Returning{})
db = db.Where("index = (?)", subQueryDB)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
return nil, fmt.Errorf("Chunk.UpdateUnassignedBatchReturning error: %w", err)
result := db.Updates(map[string]interface{}{
"proving_status": types.ProvingTaskAssigned,
"total_attempts": gorm.Expr("total_attempts + 1"),
"active_attempts": gorm.Expr("active_attempts + 1"),
})
if result.Error != nil {
return nil, fmt.Errorf("failed to select and update batch, max active attempts: %v, max total attempts: %v, err: %w",
maxActiveAttempts, maxTotalAttempts, result.Error)
}
return chunks, nil
if result.RowsAffected == 0 {
return nil, nil
}
return &updatedChunk, nil
}
// DecreaseActiveAttemptsByHash decrements the active_attempts of a chunk given its hash.
func (o *Chunk) DecreaseActiveAttemptsByHash(ctx context.Context, chunkHash string, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash = ?", chunkHash)
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
if err := db.UpdateColumn("active_attempts", gorm.Expr("active_attempts - 1")).Error; err != nil {
return fmt.Errorf("Chunk.DecreaseActiveAttemptsByHash error: %w, chunk hash: %v", err, chunkHash)
}
return nil
}

View File

@@ -244,8 +244,8 @@ func (o *ProverTask) UpdateProverTaskProof(ctx context.Context, uuid uuid.UUID,
return nil
}
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, uuid uuid.UUID, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
// UpdateProverTaskProvingStatusAndFailureType updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProvingStatusAndFailureType(ctx context.Context, uuid uuid.UUID, status types.ProverProveStatus, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
@@ -254,7 +254,12 @@ func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, uuid uui
db = db.Model(&ProverTask{})
db = db.Where("uuid = ?", uuid)
if err := db.Update("proving_status", status).Error; err != nil {
updates := make(map[string]interface{})
updates["proving_status"] = int(status)
if status == types.ProverProofInvalid {
updates["failure_type"] = int(failureType)
}
if err := db.Updates(updates).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskProvingStatus error: %w, uuid:%s, status: %v", err, uuid, status.String())
}
return nil

View File

@@ -246,8 +246,14 @@ func testValidProof(t *testing.T) {
tickStop = time.Tick(time.Minute)
)
var chunkProofStatus types.ProvingStatus
var batchProofStatus types.ProvingStatus
var (
chunkProofStatus types.ProvingStatus
batchProofStatus types.ProvingStatus
chunkActiveAttempts int16
chunkMaxAttempts int16
batchActiveAttempts int16
batchMaxAttempts int16
)
for {
select {
@@ -259,6 +265,17 @@ func testValidProof(t *testing.T) {
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified {
return
}
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(batchMaxAttempts))
assert.Equal(t, 0, int(batchActiveAttempts))
case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
return
@@ -307,8 +324,14 @@ func testInvalidProof(t *testing.T) {
tickStop = time.Tick(time.Minute)
)
var chunkProofStatus types.ProvingStatus
var batchProofStatus types.ProvingStatus
var (
chunkProofStatus types.ProvingStatus
batchProofStatus types.ProvingStatus
chunkActiveAttempts int16
chunkMaxAttempts int16
batchActiveAttempts int16
batchMaxAttempts int16
)
for {
select {
@@ -317,9 +340,18 @@ func testInvalidProof(t *testing.T) {
assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskUnassigned && batchProofStatus == types.ProvingTaskUnassigned {
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
return
}
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(batchMaxAttempts))
assert.Equal(t, 0, int(batchActiveAttempts))
case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
return
@@ -373,6 +405,10 @@ func testProofGeneratedFailed(t *testing.T) {
batchProofStatus types.ProvingStatus
chunkProverTaskProvingStatus types.ProverProveStatus
batchProverTaskProvingStatus types.ProverProveStatus
chunkActiveAttempts int16
chunkMaxAttempts int16
batchActiveAttempts int16
batchMaxAttempts int16
)
for {
@@ -386,6 +422,16 @@ func testProofGeneratedFailed(t *testing.T) {
return
}
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(batchMaxAttempts))
assert.Equal(t, 0, int(batchActiveAttempts))
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeChunk, dbChunk.Hash)
assert.NoError(t, err)
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeBatch, batch.Hash)
@@ -409,6 +455,13 @@ func testTimeoutProof(t *testing.T) {
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
var (
chunkActiveAttempts int16
chunkMaxAttempts int16
batchActiveAttempts int16
batchMaxAttempts int16
)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
@@ -438,6 +491,16 @@ func testTimeoutProof(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, batchProofStatus, types.ProvingTaskAssigned)
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 1, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(batchMaxAttempts))
assert.Equal(t, 1, int(batchActiveAttempts))
// wait coordinator to reset the prover task proving status
time.Sleep(time.Duration(conf.ProverManager.BatchCollectionTimeSec*2) * time.Second)
@@ -460,4 +523,14 @@ func testTimeoutProof(t *testing.T) {
batchProofStatus2, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, batchProofStatus2, types.ProvingTaskVerified)
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 2, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, 2, int(batchMaxAttempts))
assert.Equal(t, 0, int(batchActiveAttempts))
}

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 11, int(cur))
assert.Equal(t, 12, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -0,0 +1,28 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE chunk
ADD COLUMN total_attempts SMALLINT NOT NULL DEFAULT 0,
ADD COLUMN active_attempts SMALLINT NOT NULL DEFAULT 0;
ALTER TABLE batch
ADD COLUMN total_attempts SMALLINT NOT NULL DEFAULT 0,
ADD COLUMN active_attempts SMALLINT NOT NULL DEFAULT 0;
create index if not exists idx_total_attempts_active_attempts_end_block_number
on chunk (total_attempts, active_attempts, end_block_number)
where deleted_at IS NULL;
create index if not exists idx_total_attempts_active_attempts_chunk_proofs_status
on batch (total_attempts, active_attempts, chunk_proofs_status)
where deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index if exists idx_total_attempts_active_attempts_end_block_number;
drop index if exists idx_total_attempts_active_attempts_chunk_proofs_status;
-- +goose StatementEnd

View File

@@ -1,7 +1,7 @@
go 1.19
use (
./bridge
./rollup
./bridge-history-api
./common
./coordinator

View File

@@ -4,6 +4,8 @@ IMAGE_NAME=prover-stats-api
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
all: build
build:
GOBIN=$(PWD)/build/bin go build -o $(PWD)/build/bin/prover-stats-api ./cmd
@@ -21,3 +23,5 @@ lint: ## Lint the files - used for CI
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/prover-stats-api.Dockerfile
.PHONY: all

View File

@@ -1,28 +1,23 @@
# prover-stats-api
## how to get the prover-stats-api docs
This directory contains the `prover-stats-api` service that provides REST APIs to query the status of proving tasks assigned to the prover.
### 1. start the prover-stats-api server
## Instructions
```
cd ./prover-stats-api
make build
./prover-stats --config=./conf/config.json
```
1. Build and start the `prover-stats-api` service.
you will get server run log
```
Listening and serving HTTP on :8990
```
```
cd ./prover-stats-api
make build
./build/bin/prover-stats --config=./conf/config.json
```
### 2. browse the documents
2. Open this URL in your browser to view the API documents.
```
http://localhost:8990/swagger/index.html
```
open this documents in your browser
```
http://localhost:8990/swagger/index.html
```
## how to update the prover-stats-api docs
## How to update the prover-stats-api docs
```
cd ./prover-stats-api

View File

@@ -1,13 +1,13 @@
.PHONY: mock_abi bridge_bins event_watcher gas_oracle rollup_relayer test lint clean docker
.PHONY: mock_abi rollup_bins event_watcher gas_oracle rollup_relayer test lint clean docker
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
mock_abi:
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL1.go
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL2.go
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./rollup/mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out ./rollup/mock_bridge/MockBridgeL1.go
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./rollup/mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out ./rollup/mock_bridge/MockBridgeL2.go
bridge_bins: ## Builds the Bridge bins.
rollup_bins: ## Builds the Rollup bins.
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/

37
rollup/README.md Normal file
View File

@@ -0,0 +1,37 @@
# Rollup
This directory contains the three essential rollup services for the Scroll chain:
- Event Watcher (<a href="./cmd/event_watcher/">event_watcher</a>): watches the events emitted from the L1 and L2 contracts and updates the event database.
- Gas Oracle (<a href="./cmd/gas_oracle/">gas_oracle</a>): monitors the L1 and L2 gas price and sends transactions to update the gas price oracle contracts on L1 and L2.
- Rollup Relayer (<a href="./cmd/rollup_relayer/">rollup_relayer</a>): consists of three components: chunk and batch proposer and a relayer.
- The chunk and batch proposer proposes new chunks and batches that sends Commit Transactions for data availability and Finalize Transactions for proof verification and state finalization.
## Dependency
1. `abigen`
``` bash
go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
```
2. `solc`
See https://docs.soliditylang.org/en/latest/installing-solidity.html
## Build
```bash
make clean
make mock_abi
make rollup_bins
```
## Start
(Note: make sure you use different private keys for different senders in config.json.)
```bash
./build/bin/event_watcher --config ./config.json
./build/bin/gas_oracle --config ./config.json
./build/bin/rollup_relayer --config ./config.json
```

View File

@@ -17,8 +17,8 @@ import (
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/watcher"
)
var app *cli.App

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/rollup/cmd/event_watcher/app"
func main() {
app.Run()
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
@@ -17,10 +18,10 @@ import (
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher"
butils "scroll-tech/bridge/internal/utils"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/relayer"
"scroll-tech/rollup/internal/controller/watcher"
butils "scroll-tech/rollup/internal/utils"
)
var app *cli.App
@@ -92,14 +93,16 @@ func action(ctx *cli.Context) error {
}
// Start l1 watcher process
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
// Fetch the latest block number to decrease the delay when fetching gas prices
// Use latest block number - 1 to prevent frequent reorg
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, rpc.LatestBlockNumber)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
if loopErr = l1watcher.FetchBlockHeader(number); loopErr != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", loopErr)
if loopErr = l1watcher.FetchBlockHeader(number - 1); loopErr != nil {
log.Error("Failed to fetch L1 block header", "lastest", number-1, "err", loopErr)
}
})

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/rollup/cmd/gas_oracle/app"
func main() {
app.Run()
}

View File

@@ -11,7 +11,7 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/bridge/internal/config"
"scroll-tech/rollup/internal/config"
)
// MockApp mockApp-test client manager.
@@ -22,29 +22,29 @@ type MockApp struct {
mockApps map[utils.MockAppName]docker.AppAPI
originFile string
bridgeFile string
rollupFile string
args []string
}
// NewBridgeApp return a new bridgeApp manager, name mush be one them.
func NewBridgeApp(base *docker.App, file string) *MockApp {
// NewRollupApp return a new rollupApp manager, name mush be one them.
func NewRollupApp(base *docker.App, file string) *MockApp {
bridgeFile := fmt.Sprintf("/tmp/%d_bridge-config.json", base.Timestamp)
bridgeApp := &MockApp{
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", base.Timestamp)
rollupApp := &MockApp{
base: base,
mockApps: make(map[utils.MockAppName]docker.AppAPI),
originFile: file,
bridgeFile: bridgeFile,
args: []string{"--log.debug", "--config", bridgeFile},
rollupFile: rollupFile,
args: []string{"--log.debug", "--config", rollupFile},
}
if err := bridgeApp.MockConfig(true); err != nil {
if err := rollupApp.MockConfig(true); err != nil {
panic(err)
}
return bridgeApp
return rollupApp
}
// RunApp run bridge-test child process by multi parameters.
// RunApp run rollup-test child process by multi parameters.
func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) {
if !(name == utils.EventWatcherApp ||
name == utils.GasOracleApp ||
@@ -72,16 +72,16 @@ func (b *MockApp) WaitExit() {
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
}
// Free stop and release bridge mocked apps.
// Free stop and release rollup mocked apps.
func (b *MockApp) Free() {
b.WaitExit()
_ = os.Remove(b.bridgeFile)
_ = os.Remove(b.rollupFile)
}
// MockConfig creates a new bridge config.
// MockConfig creates a new rollup config.
func (b *MockApp) MockConfig(store bool) error {
base := b.base
// Load origin bridge config file.
// Load origin rollup config file.
cfg, err := config.NewConfig(b.originFile)
if err != nil {
return err
@@ -97,10 +97,10 @@ func (b *MockApp) MockConfig(store bool) error {
if !store {
return nil
}
// Store changed bridge config into a temp file.
// Store changed rollup config into a temp file.
data, err := json.Marshal(b.Config)
if err != nil {
return err
}
return os.WriteFile(b.bridgeFile, data, 0600)
return os.WriteFile(b.rollupFile, data, 0600)
}

View File

@@ -17,10 +17,10 @@ import (
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher"
butils "scroll-tech/bridge/internal/utils"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/relayer"
"scroll-tech/rollup/internal/controller/watcher"
butils "scroll-tech/rollup/internal/utils"
)
var app *cli.App

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/rollup/cmd/rollup_relayer/app"
func main() {
app.Run()
}

View File

@@ -65,7 +65,8 @@
"gas_cost_increase_multiplier": 1.2
},
"chunk_proposer_config": {
"max_tx_num_per_chunk": 1123,
"max_block_num_per_chunk": 100,
"max_tx_num_per_chunk": 100,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"chunk_timeout_sec": 300,

View File

@@ -1,4 +1,4 @@
module scroll-tech/bridge
module scroll-tech/rollup
go 1.19

View File

@@ -18,7 +18,7 @@ func TestConfig(t *testing.T) {
data, err := json.Marshal(cfg)
assert.NoError(t, err)
tmpJSON := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
tmpJSON := fmt.Sprintf("/tmp/%d_rollup_config.json", time.Now().Nanosecond())
defer func() {
if _, err = os.Stat(tmpJSON); err == nil {
assert.NoError(t, os.Remove(tmpJSON))

View File

@@ -28,6 +28,7 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
MaxTxNumPerChunk uint64 `json:"max_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`

View File

@@ -14,10 +14,10 @@ import (
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/sender"
"scroll-tech/rollup/internal/orm"
)
// Layer1Relayer is responsible for
@@ -80,7 +80,7 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
// ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() {
r.metrics.bridgeL1RelayerGasPriceOraclerRunTotal.Inc()
r.metrics.rollupL1RelayerGasPriceOraclerRunTotal.Inc()
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
@@ -125,7 +125,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return
}
r.lastGasPrice = block.BaseFee
r.metrics.bridgeL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
r.metrics.rollupL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
}
}
@@ -137,7 +137,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case <-ctx.Done():
return
case cfm := <-r.gasOracleSender.ConfirmChan():
r.metrics.bridgeL1GasOraclerConfirmedTotal.Inc()
r.metrics.rollupL1GasOraclerConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())

View File

@@ -0,0 +1,54 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l1RelayerMetrics struct {
rollupL1RelayedMsgsTotal prometheus.Counter
rollupL1RelayedMsgsFailureTotal prometheus.Counter
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
rollupL1RelayerLastGasPrice prometheus.Gauge
rollupL1MsgsRelayedConfirmedTotal prometheus.Counter
rollupL1GasOraclerConfirmedTotal prometheus.Counter
}
var (
initL1RelayerMetricOnce sync.Once
l1RelayerMetric *l1RelayerMetrics
)
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
initL1RelayerMetricOnce.Do(func() {
l1RelayerMetric = &l1RelayerMetrics{
rollupL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_msg_relayed_total",
Help: "The total number of the l1 relayed message.",
}),
rollupL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_msg_relayed_failure_total",
Help: "The total number of the l1 relayed failure message.",
}),
rollupL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_relayed_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
rollupL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_gas_price_oracler_total",
Help: "The total number of layer1 gas price oracler run total",
}),
rollupL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_layer1_gas_price_latest_gas_price",
Help: "The latest gas price of rollup relayer l1",
}),
rollupL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_gas_oracler_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
}
})
return l1RelayerMetric
}

View File

@@ -18,8 +18,8 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/rollup/internal/controller/sender"
"scroll-tech/rollup/internal/orm"
)
func setupL1RelayerDB(t *testing.T) *gorm.DB {

View File

@@ -20,10 +20,10 @@ import (
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/sender"
"scroll-tech/rollup/internal/orm"
)
// Layer2Relayer is responsible for
@@ -264,7 +264,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
// ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() {
r.metrics.bridgeL2RelayerGasPriceOraclerRunTotal.Inc()
r.metrics.rollupL2RelayerGasPriceOraclerRunTotal.Inc()
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
if batch == nil || err != nil {
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
@@ -302,7 +302,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
return
}
r.lastGasPrice = suggestGasPriceUint64
r.metrics.bridgeL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
r.metrics.rollupL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
}
}
@@ -317,7 +317,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
for _, batch := range pendingBatches {
r.metrics.bridgeL2RelayerProcessPendingBatchTotal.Inc()
r.metrics.rollupL2RelayerProcessPendingBatchTotal.Inc()
// get current header and parent header.
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
if err != nil {
@@ -400,7 +400,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
return
}
r.metrics.bridgeL2RelayerProcessPendingBatchSuccessTotal.Inc()
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
r.processingCommitment.Store(txID, batch.Hash)
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
}
@@ -424,7 +424,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
}
r.metrics.bridgeL2RelayerProcessCommittedBatchesTotal.Inc()
r.metrics.rollupL2RelayerProcessCommittedBatchesTotal.Inc()
batch := batches[0]
hash := batch.Hash
@@ -435,17 +435,17 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
// Check batch status before send `finalizeBatchWithProof` tx.
//batchStatus, err := r.getBatchStatusByIndex(batch.Index)
//if err != nil {
// r.metrics.bridgeL2ChainMonitorLatestFailedCall.Inc()
// r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
// log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
// return
//}
//if !batchStatus {
// r.metrics.bridgeL2ChainMonitorLatestFailedBatchStatus.Inc()
// r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
// log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
// return
//}
@@ -524,7 +524,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
"tx hash", finalizeTxHash.String(), "err", err)
}
r.processingFinalization.Store(txID, hash)
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
case types.ProvingTaskFailed:
// We were unable to prove this batch. There are two possibilities:
@@ -592,7 +592,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
r.metrics.bridgeL2BatchesCommittedConfirmedTotal.Inc()
r.metrics.rollupL2BatchesCommittedConfirmedTotal.Inc()
r.processingCommitment.Delete(confirmation.ID)
}
@@ -614,7 +614,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
r.metrics.bridgeL2BatchesFinalizedConfirmedTotal.Inc()
r.metrics.rollupL2BatchesFinalizedConfirmedTotal.Inc()
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
@@ -630,7 +630,7 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
case confirmation := <-r.finalizeSender.ConfirmChan():
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleSender.ConfirmChan():
r.metrics.bridgeL2BatchesGasOraclerConfirmedTotal.Inc()
r.metrics.rollupL2BatchesGasOraclerConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())

View File

@@ -0,0 +1,84 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l2RelayerMetrics struct {
rollupL2RelayerProcessPendingBatchTotal prometheus.Counter
rollupL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
rollupL2RelayerGasPriceOraclerRunTotal prometheus.Counter
rollupL2RelayerLastGasPrice prometheus.Gauge
rollupL2RelayerProcessCommittedBatchesTotal prometheus.Counter
rollupL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
rollupL2BatchesCommittedConfirmedTotal prometheus.Counter
rollupL2BatchesFinalizedConfirmedTotal prometheus.Counter
rollupL2BatchesGasOraclerConfirmedTotal prometheus.Counter
rollupL2ChainMonitorLatestFailedCall prometheus.Counter
rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
}
var (
initL2RelayerMetricOnce sync.Once
l2RelayerMetric *l2RelayerMetrics
)
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
initL2RelayerMetricOnce.Do(func() {
l2RelayerMetric = &l2RelayerMetrics{
rollupL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_pending_batch_total",
Help: "The total number of layer2 process pending batch",
}),
rollupL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_pending_batch_success_total",
Help: "The total number of layer2 process pending success batch",
}),
rollupL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_gas_price_oracler_total",
Help: "The total number of layer2 gas price oracler run total",
}),
rollupL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_layer2_gas_price_latest_gas_price",
Help: "The latest gas price of rollup relayer l2",
}),
rollupL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_committed_batches_total",
Help: "The total number of layer2 process committed batches run total",
}),
rollupL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_committed_batches_finalized_total",
Help: "The total number of layer2 process committed batches finalized total",
}),
rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_committed_batches_finalized_success_total",
Help: "The total number of layer2 process committed batches finalized success total",
}),
rollupL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_committed_batches_confirmed_total",
Help: "The total number of layer2 process committed batches confirmed total",
}),
rollupL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_finalized_batches_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
rollupL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_gras_oracler_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
rollupL2ChainMonitorLatestFailedCall: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_chain_monitor_latest_failed_batch_call",
Help: "The total number of failed call chain_monitor api",
}),
rollupL2ChainMonitorLatestFailedBatchStatus: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_chain_monitor_latest_failed_batch_status",
Help: "The total number of failed batch status get from chain_monitor",
}),
}
})
return l2RelayerMetric
}

View File

@@ -22,8 +22,8 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/rollup/internal/controller/sender"
"scroll-tech/rollup/internal/orm"
)
func setupL2RelayerDB(t *testing.T) *gorm.DB {

View File

@@ -13,7 +13,7 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/rollup/internal/config"
)
var (

View File

@@ -33,59 +33,59 @@ func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
initSenderMetricOnce.Do(func() {
sm = &senderMetrics{
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_total",
Name: "rollup_sender_send_transaction_total",
Help: "The total number of sending transaction.",
}, []string{"service", "name"}),
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_send_transaction_full_tx_failure_total",
Name: "rollup_sender_send_transaction_full_tx_failure_total",
Help: "The total number of sending transaction failure for full size tx.",
}, []string{"service", "name"}),
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_repeat_transaction_failure_total",
Name: "rollup_sender_send_transaction_repeat_transaction_failure_total",
Help: "The total number of sending transaction failure for repeat transaction.",
}, []string{"service", "name"}),
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_get_fee_failure_total",
Name: "rollup_sender_send_transaction_get_fee_failure_total",
Help: "The total number of sending transaction failure for getting fee.",
}, []string{"service", "name"}),
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_send_tx_failure_total",
Name: "rollup_sender_send_transaction_send_tx_failure_total",
Help: "The total number of sending transaction failure for sending tx.",
}, []string{"service", "name"}),
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_resubmit_send_transaction_total",
Name: "rollup_sender_send_transaction_resubmit_send_transaction_total",
Help: "The total number of resubmit transaction.",
}, []string{"service", "name"}),
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_pending_tx_count",
Name: "rollup_sender_pending_tx_count",
Help: "The pending tx count in the sender.",
}, []string{"service", "name"}),
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_fee_cap",
Name: "rollup_sender_gas_fee_cap",
Help: "The gas fee of current transaction.",
}, []string{"service", "name"}),
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_tip_cap",
Name: "rollup_sender_gas_tip_cap",
Help: "The gas tip of current transaction.",
}, []string{"service", "name"}),
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_price_cap",
Name: "rollup_sender_gas_price_cap",
Help: "The gas price of current transaction.",
}, []string{"service", "name"}),
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_limit",
Name: "rollup_sender_gas_limit",
Help: "The gas limit of current transaction.",
}, []string{"service", "name"}),
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_nonce",
Name: "rollup_sender_nonce",
Help: "The nonce of current transaction.",
}, []string{"service", "name"}),
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_check_pending_transaction_total",
Name: "rollup_sender_check_pending_transaction_total",
Help: "The total number of check pending transaction.",
}, []string{"service", "name"}),
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_check_balancer_total",
Name: "rollup_sender_check_balancer_total",
Help: "The total number of check balancer.",
}, []string{"service", "name"}),
}

View File

@@ -18,8 +18,8 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/utils"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/utils"
)
const (

View File

@@ -19,7 +19,7 @@ import (
"scroll-tech/common/docker"
"scroll-tech/bridge/internal/config"
"scroll-tech/rollup/internal/config"
)
const TXBatch = 50

View File

@@ -12,8 +12,8 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
)
// BatchProposer proposes batches based on available unbatched chunks.
@@ -63,39 +63,39 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_circle_total",
Name: "rollup_propose_batch_circle_total",
Help: "Total number of propose batch total.",
}),
proposeBatchFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_failure_circle_total",
Name: "rollup_propose_batch_failure_circle_total",
Help: "Total number of propose batch total.",
}),
proposeBatchUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_update_info_total",
Name: "rollup_propose_batch_update_info_total",
Help: "Total number of propose batch update info total.",
}),
proposeBatchUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_update_info_failure_total",
Name: "rollup_propose_batch_update_info_failure_total",
Help: "Total number of propose batch update info failure total.",
}),
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_total_l1_commit_gas",
Name: "rollup_propose_batch_total_l1_commit_gas",
Help: "The total l1 commit gas",
}),
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_total_l1_call_data_size",
Name: "rollup_propose_batch_total_l1_call_data_size",
Help: "The total l1 call data size",
}),
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_chunks_number",
Name: "rollup_propose_batch_chunks_number",
Help: "The number of chunks in the batch",
}),
batchFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_first_block_timeout_reached_total",
Name: "rollup_propose_batch_first_block_timeout_reached_total",
Help: "Total times of batch's first block timeout reached",
}),
batchChunksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_chunks_propose_not_enough_total",
Name: "rollup_propose_batch_chunks_propose_not_enough_total",
Help: "Total number of batch chunk propose not enough",
}),
}
@@ -122,7 +122,7 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk, batchMeta *ty
if numChunks <= 0 {
return nil
}
chunks, err := p.dbChunksToBridgeChunks(dbChunks)
chunks, err := p.dbChunksToRollupChunks(dbChunks)
if err != nil {
return err
}
@@ -154,7 +154,8 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
return nil, nil, err
}
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch)+1)
// select at most p.maxChunkNumPerBatch chunks
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch))
if err != nil {
return nil, nil, err
}
@@ -207,8 +208,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if totalChunks > p.maxChunkNumPerBatch ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
@@ -234,8 +234,6 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
}
log.Debug("breaking limit condition in batching",
"currentTotalChunks", totalChunks,
"maxChunkNumPerBatch", p.maxChunkNumPerBatch,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
@@ -249,12 +247,20 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
}
currentTimeSec := uint64(time.Now().Unix())
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
"start block number", dbChunks[0].StartBlockNumber,
"first block timestamp", dbChunks[0].StartBlockTime,
"chunk outdated time threshold", currentTimeSec,
)
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec ||
totalChunks == p.maxChunkNumPerBatch {
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
"start block number", dbChunks[0].StartBlockNumber,
"start block timestamp", dbChunks[0].StartBlockTime,
"current time", currentTimeSec,
)
} else {
log.Info("reached maximum number of chunks in batch",
"chunk count", totalChunks,
)
}
batchMeta.TotalL1CommitGas = totalL1CommitGas
batchMeta.TotalL1CommitCalldataSize = totalL1CommitCalldataSize
p.batchFirstBlockTimeoutReached.Inc()
@@ -269,7 +275,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
return nil, nil, nil
}
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
func (p *BatchProposer) dbChunksToRollupChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
chunks := make([]*types.Chunk, len(dbChunks))
for i, c := range dbChunks {
wrappedBlocks, err := p.l2BlockOrm.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)

View File

@@ -9,8 +9,8 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
)
// TODO: Add unit tests that the limits are enforced correctly.
@@ -23,6 +23,7 @@ func testBatchProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,

View File

@@ -14,14 +14,10 @@ import (
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
)
// maxNumBlockPerChunk is the maximum number of blocks we allow per chunk.
// Normally we will pack much fewer blocks because of other limits.
const maxNumBlockPerChunk int = 100
// chunkRowConsumption is map(sub-circuit name => sub-circuit row count)
type chunkRowConsumption map[string]uint64
@@ -55,6 +51,7 @@ type ChunkProposer struct {
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxBlockNumPerChunk uint64
maxTxNumPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
@@ -90,6 +87,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxBlockNumPerChunk: cfg.MaxBlockNumPerChunk,
maxTxNumPerChunk: cfg.MaxTxNumPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
@@ -98,51 +96,51 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_circle_total",
Name: "rollup_propose_chunk_circle_total",
Help: "Total number of propose chunk total.",
}),
proposeChunkFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_failure_circle_total",
Name: "rollup_propose_chunk_failure_circle_total",
Help: "Total number of propose chunk failure total.",
}),
proposeChunkUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_update_info_total",
Name: "rollup_propose_chunk_update_info_total",
Help: "Total number of propose chunk update info total.",
}),
proposeChunkUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_update_info_failure_total",
Name: "rollup_propose_chunk_update_info_failure_total",
Help: "Total number of propose chunk update info failure total.",
}),
chunkTxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_tx_num",
Name: "rollup_propose_chunk_tx_num",
Help: "The chunk tx num",
}),
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
Name: "rollup_propose_chunk_estimate_l1_commit_gas",
Help: "The chunk estimate l1 commit gas",
}),
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_total_l1_commit_call_data_size",
Name: "rollup_propose_chunk_total_l1_commit_call_data_size",
Help: "The total l1 commit call data size",
}),
totalTxGasUsed: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_total_tx_gas_used",
Name: "rollup_propose_chunk_total_tx_gas_used",
Help: "The total tx gas used",
}),
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_max_tx_consumption",
Name: "rollup_propose_chunk_max_tx_consumption",
Help: "The max tx consumption",
}),
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_chunk_block_number",
Name: "rollup_propose_chunk_chunk_block_number",
Help: "The number of blocks in the chunk",
}),
chunkFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
Name: "rollup_propose_chunk_first_block_timeout_reached_total",
Help: "Total times of chunk's first block timeout reached",
}),
chunkBlocksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_blocks_propose_not_enough_total",
Name: "rollup_propose_chunk_blocks_propose_not_enough_total",
Help: "Total number of chunk block propose not enough",
}),
}
@@ -191,7 +189,8 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
return nil, err
}
blocks, err := p.l2BlockOrm.GetL2WrappedBlocksGEHeight(p.ctx, unchunkedBlockHeight, maxNumBlockPerChunk)
// select at most p.maxBlockNumPerChunk blocks
blocks, err := p.l2BlockOrm.GetL2WrappedBlocksGEHeight(p.ctx, unchunkedBlockHeight, int(p.maxBlockNumPerChunk))
if err != nil {
return nil, err
}
@@ -293,12 +292,21 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
}
currentTimeSec := uint64(time.Now().Unix())
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
"block number", blocks[0].Header.Number,
"block timestamp", blocks[0].Header.Time,
"block outdated time threshold", currentTimeSec,
)
if chunk.Blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec ||
uint64(len(chunk.Blocks)) == p.maxBlockNumPerChunk {
if chunk.Blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
"block number", chunk.Blocks[0].Header.Number,
"block timestamp", chunk.Blocks[0].Header.Time,
"current time", currentTimeSec,
)
} else {
log.Info("reached maximum number of blocks in chunk",
"start block number", chunk.Blocks[0].Header.Number,
"block count", len(chunk.Blocks),
)
}
p.chunkFirstBlockTimeoutReached.Inc()
p.chunkTxNum.Set(float64(totalTxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))

View File

@@ -9,8 +9,8 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
)
// TODO: Add unit tests that the limits are enforced correctly.
@@ -23,6 +23,7 @@ func testChunkProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
@@ -53,6 +54,7 @@ func testChunkProposerRowConsumption(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,

View File

@@ -17,9 +17,9 @@ import (
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
)
type rollupEvent struct {

View File

@@ -26,31 +26,31 @@ func initL1WatcherMetrics(reg prometheus.Registerer) *l1WatcherMetrics {
initL1WatcherMetricOnce.Do(func() {
l1WatcherMetric = &l1WatcherMetrics{
l1WatcherFetchBlockHeaderTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_header_total",
Name: "rollup_l1_watcher_fetch_block_header_total",
Help: "The total number of l1 watcher fetch block header total",
}),
l1WatcherFetchBlockHeaderProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l1_watcher_fetch_block_header_processed_block_height",
Name: "rollup_l1_watcher_fetch_block_header_processed_block_height",
Help: "The current processed block height of l1 watcher fetch block header",
}),
l1WatcherFetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_total",
Name: "rollup_l1_watcher_fetch_block_contract_event_total",
Help: "The total number of l1 watcher fetch contract event total",
}),
l1WatcherFetchContractEventSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_success_total",
Name: "rollup_l1_watcher_fetch_block_contract_event_success_total",
Help: "The total number of l1 watcher fetch contract event success total",
}),
l1WatcherFetchContractEventProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_processed_block_height",
Name: "rollup_l1_watcher_fetch_block_contract_event_processed_block_height",
Help: "The current processed block height of l1 watcher fetch contract event",
}),
l1WatcherFetchContractEventSentEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_sent_event_total",
Name: "rollup_l1_watcher_fetch_block_contract_event_sent_event_total",
Help: "The current processed block height of l1 watcher fetch contract sent event",
}),
l1WatcherFetchContractEventRollupEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_rollup_event_total",
Name: "rollup_l1_watcher_fetch_block_contract_event_rollup_event_total",
Help: "The current processed block height of l1 watcher fetch contract rollup event",
}),
}

View File

@@ -20,9 +20,9 @@ import (
"scroll-tech/common/database"
commonTypes "scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
)
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {

View File

@@ -19,9 +19,9 @@ import (
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
)
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
@@ -116,7 +116,7 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
return
}
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
w.metrics.bridgeL2BlocksFetchedGap.Set(float64(blockHeight - to))
w.metrics.rollupL2BlocksFetchedGap.Set(float64(blockHeight - to))
}
}
@@ -246,7 +246,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
relayedMessageCount := int64(len(relayedMessageEvents))
w.metrics.bridgeL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
w.metrics.rollupL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
// Update relayed message first to make sure we don't forget to update submited message.

View File

@@ -12,8 +12,8 @@ type l2WatcherMetrics struct {
fetchRunningMissingBlocksHeight prometheus.Gauge
fetchContractEventTotal prometheus.Counter
fetchContractEventHeight prometheus.Gauge
bridgeL2MsgsRelayedEventsTotal prometheus.Counter
bridgeL2BlocksFetchedGap prometheus.Gauge
rollupL2MsgsRelayedEventsTotal prometheus.Counter
rollupL2BlocksFetchedGap prometheus.Gauge
}
var (
@@ -25,27 +25,27 @@ func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
initL2WatcherMetricOnce.Do(func() {
l2WatcherMetric = &l2WatcherMetrics{
fetchRunningMissingBlocksTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_fetch_running_missing_blocks_total",
Name: "rollup_l2_watcher_fetch_running_missing_blocks_total",
Help: "The total number of l2 watcher fetch running missing blocks",
}),
fetchRunningMissingBlocksHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_fetch_running_missing_blocks_height",
Name: "rollup_l2_watcher_fetch_running_missing_blocks_height",
Help: "The total number of l2 watcher fetch running missing blocks height",
}),
fetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_fetch_contract_events_total",
Name: "rollup_l2_watcher_fetch_contract_events_total",
Help: "The total number of l2 watcher fetch contract events",
}),
fetchContractEventHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_fetch_contract_height",
Name: "rollup_l2_watcher_fetch_contract_height",
Help: "The total number of l2 watcher fetch contract height",
}),
bridgeL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_msg_relayed_events_total",
rollupL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_l2_watcher_msg_relayed_events_total",
Help: "The total number of l2 watcher msg relayed event",
}),
bridgeL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_blocks_fetched_gap",
rollupL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_watcher_blocks_fetched_gap",
Help: "The gap of l2 fetch",
}),
}

View File

@@ -24,11 +24,11 @@ import (
"scroll-tech/common/database"
cutils "scroll-tech/common/utils"
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge"
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/controller/sender"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
"scroll-tech/rollup/mock_bridge"
)
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {

View File

@@ -15,7 +15,7 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/config"
"scroll-tech/rollup/internal/config"
)
var (

View File

@@ -70,19 +70,30 @@ func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}
return l1Blocks, nil
}
// InsertL1Blocks batch insert l1 blocks
// InsertL1Blocks batch inserts l1 blocks.
// If there's a block number conflict (e.g., due to reorg), soft deletes the existing block and inserts the new one.
func (o *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
if len(blocks) == 0 {
return nil
}
db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
return o.db.Transaction(func(tx *gorm.DB) error {
var blockNumbers []uint64
for _, block := range blocks {
blockNumbers = append(blockNumbers, block.Number)
}
if err := db.Create(&blocks).Error; err != nil {
return fmt.Errorf("L1Block.InsertL1Blocks error: %w", err)
}
return nil
db := tx.WithContext(ctx)
db = db.Model(&L1Block{})
if err := db.Where("number IN (?)", blockNumbers).Delete(&L1Block{}).Error; err != nil {
return fmt.Errorf("L1Block.InsertL1Blocks error: soft deleting blocks failed, block numbers: %v, error: %w", blockNumbers, err)
}
if err := db.Create(&blocks).Error; err != nil {
return fmt.Errorf("L1Block.InsertL1Blocks error: %w", err)
}
return nil
})
}
// UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash

View File

@@ -89,6 +89,51 @@ func tearDownEnv(t *testing.T) {
base.Free()
}
func TestL1BlockOrm(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
l1BlockOrm := NewL1Block(db)
// mock blocks
block1 := L1Block{Number: 1, Hash: "hash1"}
block2 := L1Block{Number: 2, Hash: "hash2"}
block2AfterReorg := L1Block{Number: 2, Hash: "hash3"}
err = l1BlockOrm.InsertL1Blocks(context.Background(), []L1Block{block1, block2})
assert.NoError(t, err)
height, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), height)
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{})
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, "hash1", blocks[0].Hash)
assert.Equal(t, "hash2", blocks[1].Hash)
// reorg handling: insert another block with same height and different hash
err = l1BlockOrm.InsertL1Blocks(context.Background(), []L1Block{block2AfterReorg})
assert.NoError(t, err)
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{})
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, "hash1", blocks[0].Hash)
assert.Equal(t, "hash3", blocks[1].Hash)
err = l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(context.Background(), "hash1", types.GasOracleImported, "txhash1")
assert.NoError(t, err)
updatedBlocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{})
assert.NoError(t, err)
assert.Len(t, updatedBlocks, 2)
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBlocks[0].GasOracleStatus))
assert.Equal(t, "txhash1", updatedBlocks[0].OracleTxHash)
}
func TestL2BlockOrm(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)

View File

@@ -9,7 +9,7 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
bridgeAbi "scroll-tech/bridge/abi"
bridgeAbi "scroll-tech/rollup/abi"
)
// Keccak2 compute the keccack256 of two concatenations of bytes32

Some files were not shown because too many files have changed in this diff Show More