mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
3 Commits
v4.3.41-te
...
revert_mon
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e34cfbbd49 | ||
|
|
cab8d4a903 | ||
|
|
fc8ea35ad0 |
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -20,7 +20,7 @@ Your PR title must follow [conventional commits](https://www.conventionalcommits
|
||||
|
||||
### Deployment tag versioning
|
||||
|
||||
Has `tag` in `common/version.go` been updated or have you added `bump-version` label to this PR?
|
||||
Has `tag` in `common/version.go` been updated?
|
||||
|
||||
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
|
||||
- [ ] Yes
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Rollup
|
||||
name: Bridge
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -8,11 +8,10 @@ on:
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'rollup/**'
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/rollup.yml'
|
||||
- '.github/workflows/bridge.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
@@ -20,11 +19,10 @@ on:
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'rollup/**'
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/rollup.yml'
|
||||
- '.github/workflows/bridge.yml'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
@@ -46,7 +44,7 @@ jobs:
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Lint
|
||||
working-directory: 'rollup'
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make mock_abi
|
||||
@@ -64,14 +62,14 @@ jobs:
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
run: goimports -local scroll-tech/rollup/ -w .
|
||||
working-directory: 'rollup'
|
||||
run: goimports -local scroll-tech/bridge/ -w .
|
||||
working-directory: 'bridge'
|
||||
- name: Run go mod tidy
|
||||
run: go mod tidy
|
||||
working-directory: 'rollup'
|
||||
working-directory: 'bridge'
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'rollup'
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
@@ -97,13 +95,13 @@ jobs:
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C rollup mock_abi
|
||||
- name: Build rollup binaries
|
||||
working-directory: 'rollup'
|
||||
make -C bridge mock_abi
|
||||
- name: Build bridge binaries
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
make rollup_bins
|
||||
- name: Test rollup packages
|
||||
working-directory: 'rollup'
|
||||
make bridge_bins
|
||||
- name: Test bridge packages
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
@@ -111,7 +109,7 @@ jobs:
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: rollup
|
||||
flags: bridge
|
||||
# docker-build:
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
6
.github/workflows/bump_version.yml
vendored
6
.github/workflows/bump_version.yml
vendored
@@ -1,18 +1,16 @@
|
||||
name: Bump version
|
||||
name: Bump Version
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ develop ]
|
||||
branches: [develop]
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
try-to-bump:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'bump-version')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
|
||||
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -9,7 +9,6 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/common.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -19,7 +18,6 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/common.yml'
|
||||
|
||||
jobs:
|
||||
|
||||
2
.github/workflows/coordinator.yml
vendored
2
.github/workflows/coordinator.yml
vendored
@@ -10,7 +10,6 @@ on:
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
pull_request:
|
||||
@@ -22,7 +21,6 @@ on:
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
|
||||
|
||||
2
.github/workflows/database.yml
vendored
2
.github/workflows/database.yml
vendored
@@ -10,7 +10,6 @@ on:
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/database.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -21,7 +20,6 @@ on:
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/database.yml'
|
||||
|
||||
jobs:
|
||||
|
||||
@@ -48,6 +48,27 @@ jobs:
|
||||
tags: scrolltech/gas-oracle:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
msg_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push msg_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/msg_relayer.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/msg-relayer:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -111,7 +132,7 @@ jobs:
|
||||
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
coordinator-api:
|
||||
coordinator:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -127,12 +148,12 @@ jobs:
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
file: ./build/dockerfiles/coordinator.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/coordinator-api:${{github.ref_name}}
|
||||
tags: scrolltech/coordinator:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
coordinator-cron:
|
||||
prover-stats-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -144,12 +165,12 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push coordinator docker
|
||||
- name: Build and push prover-stats-api docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-cron.Dockerfile
|
||||
file: ./build/dockerfiles/prover-stats-api.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/coordinator-cron:${{github.ref_name}}
|
||||
tags: scrolltech/prover-stats-api:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C rollup mock_abi
|
||||
make -C bridge mock_abi
|
||||
make -C common/bytecode all
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
80
.github/workflows/prover_stats_api.yml
vendored
Normal file
80
.github/workflows/prover_stats_api.yml
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
name: ProverStatsAPI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'prover-stats-api/**'
|
||||
- '.github/workflows/prover_stats_api.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'prover-stats-api/**'
|
||||
- '.github/workflows/prover_stats_api.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'prover-stats-api'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
test:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
run: |
|
||||
make test
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prover-stats-api
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/prover-stats-api/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
17
.gitignore
vendored
17
.gitignore
vendored
@@ -1,22 +1,9 @@
|
||||
# Asset files
|
||||
.idea
|
||||
assets/params*
|
||||
assets/seed
|
||||
|
||||
# Built binaries
|
||||
build/bin
|
||||
|
||||
coverage.txt
|
||||
build/bin
|
||||
*.integration.txt
|
||||
|
||||
# Visual Studio Code
|
||||
.vscode
|
||||
|
||||
# IntelliJ
|
||||
.idea
|
||||
|
||||
# MacOS
|
||||
.DS_Store
|
||||
|
||||
# misc
|
||||
sftp-config.json
|
||||
*~
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,6 +1,9 @@
|
||||
[submodule "l2geth"]
|
||||
path = l2geth
|
||||
url = git@github.com:scroll-tech/go-ethereum.git
|
||||
[submodule "rpc-gateway"]
|
||||
path = rpc-gateway
|
||||
url = git@github.com:scroll-tech/rpc-gateway.git
|
||||
[submodule "contracts/lib/ds-test"]
|
||||
path = contracts/lib/ds-test
|
||||
url = https://github.com/dapphub/ds-test
|
||||
|
||||
@@ -1,128 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
contributor@scroll.io.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
@@ -1,42 +0,0 @@
|
||||
## Contributing
|
||||
|
||||
[fork]: /fork
|
||||
[pr]: /compare
|
||||
[style]: https://standardjs.com/
|
||||
[code-of-conduct]: CODE_OF_CONDUCT.md
|
||||
|
||||
Hi there! We're thrilled that you'd like to contribute to this project. Your help is essential for keeping it great.
|
||||
|
||||
Please note that this project is released with a [Contributor Code of Conduct][code-of-conduct]. By participating in this project you agree to abide by its terms.
|
||||
|
||||
## Contribute to Scroll
|
||||
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
|
||||
## Issues and PRs
|
||||
|
||||
If you have suggestions for how this project could be improved, or want to report a bug, open an issue! We'd love all and any contributions. If you have questions, too, we'd love to hear them.
|
||||
|
||||
We'd also love PRs. If you're thinking of a large PR, we advise opening up an issue first to talk about it, though! Look at the links below if you're not sure how to open a PR.
|
||||
|
||||
## Submitting a pull request
|
||||
|
||||
1. [Fork][fork] and clone the repository.
|
||||
1. Create a new branch: `git checkout -b my-branch-name`.
|
||||
1. Make your change, add tests, and make sure the tests still pass.
|
||||
1. Push to your fork and [submit a pull request][pr].
|
||||
1. Pat yourself on the back and wait for your pull request to be reviewed and merged.
|
||||
|
||||
Here are a few things you can do that will increase the likelihood of your pull request being accepted:
|
||||
|
||||
- Write and update tests.
|
||||
- Keep your changes as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
|
||||
- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
|
||||
|
||||
Work in Progress pull requests are also welcome to get feedback early on, or if there is something that blocked you.
|
||||
|
||||
## Resources
|
||||
|
||||
- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
|
||||
- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/)
|
||||
- [GitHub Help](https://help.github.com)
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2023 Scroll
|
||||
Copyright (c) 2022 Scroll
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
8
Makefile
8
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: check update dev_docker build_test_docker run_test_docker clean
|
||||
|
||||
L2GETH_TAG=scroll-v4.3.55
|
||||
L2GETH_TAG=scroll-v4.3.34
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
@@ -8,7 +8,7 @@ help: ## Display this help message
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
lint: ## The code's format and security checks.
|
||||
make -C rollup lint
|
||||
make -C bridge lint
|
||||
make -C common lint
|
||||
make -C coordinator lint
|
||||
make -C database lint
|
||||
@@ -17,7 +17,7 @@ lint: ## The code's format and security checks.
|
||||
|
||||
update: ## update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
@@ -25,7 +25,7 @@ update: ## update dependencies
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
goimports -local $(PWD)/rollup/ -w .
|
||||
goimports -local $(PWD)/bridge/ -w .
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
goimports -local $(PWD)/coordinator/ -w .
|
||||
|
||||
40
README.md
40
README.md
@@ -1,33 +1,7 @@
|
||||
# Scroll Monorepo
|
||||
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/integration.yml)
|
||||
[](https://codecov.io/gh/scroll-tech/scroll)
|
||||
|
||||
<a href="https://scroll.io">Scroll</a> is a zkRollup Layer 2 dedicated to enhance Ethereum scalability through a bytecode-equivalent [zkEVM](https://github.com/scroll-tech/zkevm-circuits) circuit. This monorepo encompasses essential infrastructure components of the Scroll protocol. It contains the L1 and L2 contracts, the rollup node, the prover client, and the prover coordinator.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
<pre>
|
||||
├── <a href="./bridge-history-api/">bridge-history-api</a>: Bridge history service that collects deposit and withdraw events from both L1 and L2 chain and generates withdrawal proofs
|
||||
├── <a href="./common/">common</a>: Common libraries and types
|
||||
├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers
|
||||
├── <a href="./database">database</a>: Database client and schema definition
|
||||
├── <a href="./src">l2geth</a>: Scroll execution node
|
||||
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
|
||||
├── <a href="./rollup">rollup</a>: Rollup-related services
|
||||
├── <a href="./rpc-gateway">rpc-gateway</a>: RPC gateway external repo
|
||||
└── <a href="./tests">tests</a>: Integration tests
|
||||
</pre>
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome community contributions to this repository. Before you submit any issues or PRs, please read the [Code of Conduct](CODE_OF_CONDUCT.md) and the [Contribution Guideline](CONTRIBUTING.md).
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.19
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
@@ -41,14 +15,14 @@ docker pull postgres
|
||||
make dev_docker
|
||||
```
|
||||
|
||||
## Testing Rollup & Coordinator
|
||||
## Testing Bridge & Coordinator
|
||||
|
||||
### For Non-Apple Silicon (M1/M2) Macs
|
||||
|
||||
Run the tests using the following commands:
|
||||
|
||||
```bash
|
||||
go test -v -race -covermode=atomic scroll-tech/rollup/...
|
||||
go test -v -race -covermode=atomic scroll-tech/bridge/...
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
@@ -81,7 +55,7 @@ This command runs a Docker container named `scroll_test_container` from the `scr
|
||||
Once the Docker container is running, execute the tests using the following commands:
|
||||
|
||||
```bash
|
||||
go test -v -race -covermode=atomic scroll-tech/rollup/...
|
||||
go test -v -race -covermode=atomic scroll-tech/bridge/...
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
@@ -89,10 +63,6 @@ go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
|
||||
## Testing Contracts
|
||||
|
||||
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).
|
||||
You can find the unit tests in [`<REPO_DIR>/contracts/src/test/`](/contracts/src/test/), and integration tests in [`<REPO_DIR>/contracts/integration-test/`](/contracts/integration-test/).
|
||||
|
||||
See [`contracts`](/contracts) for more details on the contracts.
|
||||
|
||||
## License
|
||||
|
||||
Scroll Monorepo is licensed under the [MIT](./LICENSE) license.
|
||||
For more details on contracts, see [`/contracts`](/contracts).
|
||||
|
||||
@@ -1,82 +1 @@
|
||||
# bridge-history-api
|
||||
|
||||
This directory contains the `bridge-history-api` service that provides REST APIs to query txs interact with Scroll official bridge contracts
|
||||
|
||||
## Instructions
|
||||
The bridge-history-api contains three distinct components
|
||||
|
||||
### bridgehistoryapi-db-cli
|
||||
|
||||
Provide init, show version, rollback, check status services of DB
|
||||
```
|
||||
cd ./bridge-history-api
|
||||
make bridgehistoryapi-db-cli
|
||||
./build/bin/bridgehistoryapi-db-cli [command]
|
||||
```
|
||||
|
||||
### bridgehistoryapi-cross-msg-fetcher
|
||||
|
||||
Fetch the transactions from both l1 and l2
|
||||
```
|
||||
cd ./bridge-history-api
|
||||
make bridgehistoryapi-cross-msg-fetcher
|
||||
./build/bin/bridgehistoryapi-cross-msg-fetcher
|
||||
```
|
||||
|
||||
### bridgehistoryapi-server
|
||||
|
||||
provides REST APIs. Please refer to the API details below.
|
||||
```
|
||||
cd ./bridge-history-api
|
||||
make bridgehistoryapi-server
|
||||
./build/bin/bridgehistoryapi-server
|
||||
```
|
||||
|
||||
## APIs provided by bridgehistoryapi-server
|
||||
|
||||
assume `bridgehistoryapi-server` listening on `https://localhost:8080`
|
||||
can change this port thru modify `config.json`
|
||||
|
||||
1. `/txs`
|
||||
```
|
||||
// @Summary get all txs under given address
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param address query string true "wallet address"
|
||||
// @Param page_size query int true "page size"
|
||||
// @Param page query int true "page"
|
||||
// @Success 200
|
||||
// @Router /api/txs [get]
|
||||
```
|
||||
|
||||
2. `/txsbyhashes`
|
||||
```
|
||||
// @Summary get txs by given tx hashes
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param hashes query string array true "array of hashes list"
|
||||
// @Success 200
|
||||
// @Router /api/txsbyhashes [post]
|
||||
```
|
||||
|
||||
3. `/claimable`
|
||||
```
|
||||
// @Summary get all claimable txs under given address
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param address query string true "wallet address"
|
||||
// @Param page_size query int true "page size"
|
||||
// @Param page query int true "page"
|
||||
// @Success 200
|
||||
// @Router /api/claimable [get]
|
||||
```
|
||||
|
||||
4. `/withdraw_root`
|
||||
```
|
||||
// @Summary get withdraw_root of given batch index
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param batch_index query string true "batch_index"
|
||||
// @Success 200
|
||||
// @Router /api/withdraw_root [get]
|
||||
```
|
||||
@@ -89,6 +89,9 @@ var (
|
||||
// L2FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
|
||||
L2FailedRelayedMessageEventSignature common.Hash
|
||||
|
||||
// L2ImportBlockEventSignature = keccak256("ImportBlock(bytes32,uint256,uint256,uint256,bytes32)")
|
||||
L2ImportBlockEventSignature common.Hash
|
||||
|
||||
// L2AppendMessageEventSignature = keccak256("AppendMessage(uint256,bytes32)")
|
||||
L2AppendMessageEventSignature common.Hash
|
||||
)
|
||||
@@ -150,6 +153,8 @@ func init() {
|
||||
L2RelayedMessageEventSignature = L2ScrollMessengerABI.Events["RelayedMessage"].ID
|
||||
L2FailedRelayedMessageEventSignature = L2ScrollMessengerABI.Events["FailedRelayedMessage"].ID
|
||||
|
||||
L2ImportBlockEventSignature = L1BlockContainerABI.Events["ImportBlock"].ID
|
||||
|
||||
L2AppendMessageEventSignature = L2MessageQueueABI.Events["AppendMessage"].ID
|
||||
|
||||
}
|
||||
|
||||
@@ -7,13 +7,11 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/internal/controller"
|
||||
"bridge-history-api/internal/route"
|
||||
"bridge-history-api/observability"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
@@ -56,18 +54,13 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
router := gin.Default()
|
||||
controller.InitController(db)
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
route.Route(router, cfg, registry)
|
||||
route.Route(router, cfg)
|
||||
|
||||
go func() {
|
||||
if runServerErr := router.Run(fmt.Sprintf(":%s", port)); runServerErr != nil {
|
||||
log.Crit("run http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
|
||||
observability.Server(ctx, db)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
@@ -82,18 +82,6 @@ func action(ctx *cli.Context) error {
|
||||
common.HexToAddress(cfg.L1.WETHGatewayAddr),
|
||||
}
|
||||
|
||||
if cfg.L1.USDCGatewayAddr != "" {
|
||||
l1AddressList = append(l1AddressList, common.HexToAddress(cfg.L1.USDCGatewayAddr))
|
||||
}
|
||||
|
||||
if cfg.L1.LIDOGatewayAddr != "" {
|
||||
l1AddressList = append(l1AddressList, common.HexToAddress(cfg.L1.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if cfg.L2.DAIGatewayAddr != "" {
|
||||
l1AddressList = append(l1AddressList, common.HexToAddress(cfg.L1.DAIGatewayAddr))
|
||||
}
|
||||
|
||||
l2AddressList := []common.Address{
|
||||
common.HexToAddress(cfg.L2.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.L2.ERC721GatewayAddr),
|
||||
@@ -104,18 +92,6 @@ func action(ctx *cli.Context) error {
|
||||
common.HexToAddress(cfg.L2.WETHGatewayAddr),
|
||||
}
|
||||
|
||||
if cfg.L2.USDCGatewayAddr != "" {
|
||||
l2AddressList = append(l2AddressList, common.HexToAddress(cfg.L2.USDCGatewayAddr))
|
||||
}
|
||||
|
||||
if cfg.L2.LIDOGatewayAddr != "" {
|
||||
l2AddressList = append(l2AddressList, common.HexToAddress(cfg.L2.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if cfg.L2.DAIGatewayAddr != "" {
|
||||
l2AddressList = append(l2AddressList, common.HexToAddress(cfg.L2.DAIGatewayAddr))
|
||||
}
|
||||
|
||||
l1crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, crossmsg.L1ReorgHandling)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l1 cross message fetcher", "error", err)
|
||||
|
||||
@@ -1,39 +1,33 @@
|
||||
{
|
||||
"batchInfoFetcher": {
|
||||
"batchIndexStartBlock": 9091265,
|
||||
"ScrollChainAddr": "0x1799c3Df650caB9DFBb228C971016707D8f8721D"
|
||||
"ScrollChainAddr": "0xcD00DB804C819175B381b2B44Aa16A391c8a01D6"
|
||||
},
|
||||
"l1": {
|
||||
"confirmation": 64,
|
||||
"endpoint": "https://rpc.ankr.com/eth",
|
||||
"startHeight": 18310747,
|
||||
"endpoint": "https://rpc.ankr.com/eth_goerli",
|
||||
"startHeight": 9090194 ,
|
||||
"blockTime": 10,
|
||||
"MessengerAddr": "0x7318152B19c3c97c886D5ee6C2525E62ce8e2abA",
|
||||
"ETHGatewayAddr": "0xd165b42d857eae2915625819464a2a1f91E5d0A5",
|
||||
"WETHGatewayAddr": "0xb0255e4C1a919619D1CafBA51021d638c4F71b89",
|
||||
"StandardERC20Gateway": "0x00fEc01A9b975bA37466B4E9006dF2C71BFE0e48",
|
||||
"CustomERC20GatewayAddr": "0xD8874B0E6C3CC43C00B69D60c21Ef452d1159bDe",
|
||||
"ERC721GatewayAddr": "0x131B46649F6882d686a766cb8b68c4cB0ACdeb24",
|
||||
"ERC1155GatewayAddr": "0xCeE721789FAA05c7F4463efB664520656aB7C7d5",
|
||||
"USDCGatewayAddr": "0x37ba659D6CC380D12Fb96567CC52FC8e1DF4E334",
|
||||
"LIDOGatewayAddr": "0x892dDB2899325aBBA1fD00FDA8249B40Cbbc33F9",
|
||||
"DAIGatewayAddr": "0xD8dD7787f89c7E6243AD32E0d0cCf460243C8130"
|
||||
"MessengerAddr": "0x326517Eb8eB1Ce5eaB5b513C2e9A24839b402d90",
|
||||
"ETHGatewayAddr": "0x8305cB7B8448677736095965B63d7431017328fe",
|
||||
"WETHGatewayAddr": "0xe3bA3c60d99a2d9a5f817734bC85353470b23931",
|
||||
"StandardERC20Gateway": "0x16c1079B27eD9c363B7D08aC5Ae937A398972A5C",
|
||||
"CustomERC20GatewayAddr": "0x61f08caD3d6F77801167d3bA8669433701586643",
|
||||
"ERC721GatewayAddr": "0x4A73D25A4C99CB912acaf6C5B5e554f2982201c5",
|
||||
"ERC1155GatewayAddr": "0xa3F5DD3033698c2832C53f3C3Fe6E062F58cD808"
|
||||
},
|
||||
"l2": {
|
||||
"confirmation": 1,
|
||||
"endpoint": "http://mainnet-l2geth-internal-1.mainnet.scroll.tech:8545",
|
||||
"endpoint": "http://staging-l2geth-rpc0.scroll.tech:8545",
|
||||
"blockTime": 3,
|
||||
"startHeight": 0,
|
||||
"MessengerAddr": "0xda7c91Ed60DACD28Cb97B180108958c9ACC7698a",
|
||||
"ETHGatewayAddr": "0x567671187b5FFbcDFe0B6EcF3e56C05508a31A87",
|
||||
"WETHGatewayAddr": "0x3b03aE2F27d62E0B2b6740CA20Fc07Af4338B791",
|
||||
"StandardERC20Gateway": "0xb00cb1F6f7C43D2EE8C4e2163a6bEA22441A5B7c",
|
||||
"CustomERC20GatewayAddr": "0x63CCb38E9d21A72777b203267F2e4ba5C974fC62",
|
||||
"ERC721GatewayAddr": "0xE2c36a2D8B5528719aE7A42A778b2D08b18d134a",
|
||||
"ERC1155GatewayAddr": "0xfF14870512e42BFb85a9B7bEfDc06e9aB5A37269",
|
||||
"USDCGatewayAddr": "0x97D5799CDC8eE2A7452913d7548c7cEE285719FA",
|
||||
"LIDOGatewayAddr": "0xE9c5C9f67ec7B773fC76440845751F657bb953FF",
|
||||
"DAIGatewayAddr": "0xC5034eB8F682b73F93C9246aa95A8eBbF82793aA"
|
||||
"CustomERC20GatewayAddr": "0x905db21f836749fEeD12de781afc4A5Ab4Dd0d51",
|
||||
"ERC721GatewayAddr": "0xC53D835514780664BCd7eCfcE7c2E5d9554dc41B",
|
||||
"StandardERC20Gateway": "0x90271634BCB020e06ea4840C3f7aa61b8F860651",
|
||||
"MessengerAddr": "0xE8b0956Ac75c65Aa1669e83888DA13afF2E108f4",
|
||||
"ETHGatewayAddr": "0xD5938590D5dD8ce95812D4D515a219C12C551D67",
|
||||
"WETHGatewayAddr": "0xb0aaA582564fade4232a16fdB1383004A6A7247F",
|
||||
"ERC1155GatewayAddr": "0x4f33B1655619c2C0B7C450128Df760B4365Cb549"
|
||||
},
|
||||
"db": {
|
||||
"dsn": "postgres://postgres:1234@localhost:5444/test?sslmode=disable",
|
||||
|
||||
@@ -31,9 +31,6 @@ type LayerConfig struct {
|
||||
MessengerAddr string `json:"MessengerAddr"`
|
||||
ETHGatewayAddr string `json:"ETHGatewayAddr"`
|
||||
WETHGatewayAddr string `json:"WETHGatewayAddr"`
|
||||
USDCGatewayAddr string `json:"USDCGatewayAddr"`
|
||||
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
|
||||
DAIGatewayAddr string `json:"DAIGatewayAddr"`
|
||||
StandardERC20Gateway string `json:"StandardERC20Gateway"`
|
||||
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
|
||||
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
// Todo : read from config
|
||||
var (
|
||||
// the number of blocks fetch per round
|
||||
fetchLimit = uint64(100)
|
||||
fetchLimit = uint64(3000)
|
||||
)
|
||||
|
||||
// FetchAndSave is a function type that fetches events from blockchain and saves them to database
|
||||
|
||||
@@ -3,20 +3,15 @@ module bridge-history-api
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/bits-and-blooms/bitset v1.7.0
|
||||
github.com/ethereum/go-ethereum v1.12.2
|
||||
github.com/gin-contrib/cors v1.4.0
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.19
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.3.0
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
gorm.io/gorm v1.25.2
|
||||
)
|
||||
@@ -25,6 +20,7 @@ require (
|
||||
github.com/DataDog/zstd v1.5.2 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.7.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||
github.com/bytedance/sonic v1.9.2 // indirect
|
||||
@@ -98,6 +94,7 @@ require (
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
@@ -119,7 +116,8 @@ require (
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
|
||||
@@ -119,8 +119,6 @@ github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnR
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=
|
||||
github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs=
|
||||
github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazzvg=
|
||||
github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
|
||||
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
@@ -364,8 +362,6 @@ github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
|
||||
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
@@ -535,8 +531,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
||||
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
||||
@@ -24,14 +24,14 @@ func NewBatchController(db *gorm.DB) *BatchController {
|
||||
func (b *BatchController) GetWithdrawRootByBatchIndex(ctx *gin.Context) {
|
||||
var req types.QueryByBatchIndexRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
|
||||
return
|
||||
}
|
||||
result, err := b.batchLogic.GetWithdrawRootByBatchIndex(ctx, req.BatchIndex)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetWithdrawRootByBatchIndexFailure, err)
|
||||
types.RenderJSON(ctx, types.ErrGetWithdrawRootByBatchIndexFailure, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
types.RenderSuccess(ctx, result)
|
||||
types.RenderJSON(ctx, types.Success, nil, result)
|
||||
}
|
||||
|
||||
@@ -10,8 +10,7 @@ var (
|
||||
// HistoryCtrler is controller instance
|
||||
HistoryCtrler *HistoryController
|
||||
// BatchCtrler is controller instance
|
||||
BatchCtrler *BatchController
|
||||
|
||||
BatchCtrler *BatchController
|
||||
initControllerOnce sync.Once
|
||||
)
|
||||
|
||||
|
||||
@@ -1,40 +1,23 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"golang.org/x/sync/singleflight"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/internal/logic"
|
||||
"bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheKeyPrefixClaimableTxsByAddr = "claimableTxsByAddr:"
|
||||
cacheKeyPrefixQueryTxsByHash = "queryTxsByHash:"
|
||||
)
|
||||
|
||||
// HistoryController contains the query claimable txs service
|
||||
type HistoryController struct {
|
||||
historyLogic *logic.HistoryLogic
|
||||
cache *cache.Cache
|
||||
singleFlight singleflight.Group
|
||||
cacheMetrics *cacheMetrics
|
||||
}
|
||||
|
||||
// NewHistoryController return HistoryController instance
|
||||
func NewHistoryController(db *gorm.DB) *HistoryController {
|
||||
return &HistoryController{
|
||||
historyLogic: logic.NewHistoryLogic(db),
|
||||
cache: cache.New(30*time.Second, 10*time.Minute),
|
||||
cacheMetrics: initCacheMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,120 +25,48 @@ func NewHistoryController(db *gorm.DB) *HistoryController {
|
||||
func (c *HistoryController) GetAllClaimableTxsByAddr(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
cacheKey := cacheKeyPrefixClaimableTxsByAddr + req.Address
|
||||
if cachedData, found := c.cache.Get(cacheKey); found {
|
||||
c.cacheMetrics.cacheHits.WithLabelValues("GetAllClaimableTxsByAddr").Inc()
|
||||
// Log cache hit along with request param.
|
||||
log.Info("cache hit", "request", req)
|
||||
if cachedData == nil {
|
||||
types.RenderSuccess(ctx, &types.ResultData{})
|
||||
return
|
||||
} else if resultData, ok := cachedData.(*types.ResultData); ok {
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
return
|
||||
}
|
||||
// Log error for unexpected type, then fetch data from the database.
|
||||
log.Error("unexpected type in cache", "expected", "*types.ResultData", "got", reflect.TypeOf(cachedData))
|
||||
} else {
|
||||
c.cacheMetrics.cacheMisses.WithLabelValues("GetAllClaimableTxsByAddr").Inc()
|
||||
// Log cache miss along with request param.
|
||||
log.Info("cache miss", "request", req)
|
||||
}
|
||||
|
||||
result, err, _ := c.singleFlight.Do(cacheKey, func() (interface{}, error) {
|
||||
txs, total, err := c.historyLogic.GetClaimableTxsByAddress(ctx, common.HexToAddress(req.Address))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultData := &types.ResultData{Result: txs, Total: total}
|
||||
c.cache.Set(cacheKey, resultData, cache.DefaultExpiration)
|
||||
return resultData, nil
|
||||
})
|
||||
|
||||
offset := (req.Page - 1) * req.PageSize
|
||||
limit := req.PageSize
|
||||
txs, total, err := c.historyLogic.GetClaimableTxsByAddress(ctx, common.HexToAddress(req.Address), offset, limit)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetClaimablesFailure, err)
|
||||
types.RenderJSON(ctx, types.ErrGetClaimablesFailure, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if resultData, ok := result.(*types.ResultData); ok {
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
} else {
|
||||
log.Error("unexpected type from singleflight", "expected", "*types.ResultData", "got", reflect.TypeOf(result))
|
||||
types.RenderFailure(ctx, types.ErrGetClaimablesFailure, errors.New("unexpected error"))
|
||||
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: txs, Total: total})
|
||||
}
|
||||
|
||||
// GetAllTxsByAddr defines the http get method behavior
|
||||
func (c *HistoryController) GetAllTxsByAddr(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
|
||||
return
|
||||
}
|
||||
offset := (req.Page - 1) * req.PageSize
|
||||
limit := req.PageSize
|
||||
message, total, err := c.historyLogic.GetTxsByAddress(ctx, common.HexToAddress(req.Address), offset, limit)
|
||||
if err != nil {
|
||||
types.RenderJSON(ctx, types.ErrGetTxsByAddrFailure, err, nil)
|
||||
return
|
||||
}
|
||||
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: message, Total: total})
|
||||
}
|
||||
|
||||
// PostQueryTxsByHash defines the http post method behavior
|
||||
func (c *HistoryController) PostQueryTxsByHash(ctx *gin.Context) {
|
||||
var req types.QueryByHashRequest
|
||||
if err := ctx.ShouldBindJSON(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if len(req.Txs) > 10 {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, errors.New("the number of hashes in the request exceeds the allowed maximum of 10"))
|
||||
result, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
|
||||
if err != nil {
|
||||
types.RenderJSON(ctx, types.ErrGetTxsByHashFailure, err, nil)
|
||||
return
|
||||
}
|
||||
hashesMap := make(map[string]struct{}, len(req.Txs))
|
||||
results := make([]*types.TxHistoryInfo, 0, len(req.Txs))
|
||||
uncachedHashes := make([]string, 0, len(req.Txs))
|
||||
for _, hash := range req.Txs {
|
||||
if _, exists := hashesMap[hash]; exists {
|
||||
// Skip duplicate tx hash values.
|
||||
continue
|
||||
}
|
||||
hashesMap[hash] = struct{}{}
|
||||
|
||||
cacheKey := cacheKeyPrefixQueryTxsByHash + hash
|
||||
if cachedData, found := c.cache.Get(cacheKey); found {
|
||||
c.cacheMetrics.cacheHits.WithLabelValues("PostQueryTxsByHash").Inc()
|
||||
// Log cache hit along with tx hash.
|
||||
log.Info("cache hit", "tx hash", hash)
|
||||
if cachedData == nil {
|
||||
continue
|
||||
} else if txInfo, ok := cachedData.(*types.TxHistoryInfo); ok {
|
||||
results = append(results, txInfo)
|
||||
} else {
|
||||
log.Error("unexpected type in cache", "expected", "*types.TxHistoryInfo", "got", reflect.TypeOf(cachedData))
|
||||
uncachedHashes = append(uncachedHashes, hash)
|
||||
}
|
||||
} else {
|
||||
c.cacheMetrics.cacheMisses.WithLabelValues("PostQueryTxsByHash").Inc()
|
||||
// Log cache miss along with tx hash.
|
||||
log.Info("cache miss", "tx hash", hash)
|
||||
uncachedHashes = append(uncachedHashes, hash)
|
||||
}
|
||||
}
|
||||
|
||||
if len(uncachedHashes) > 0 {
|
||||
dbResults, err := c.historyLogic.GetTxsByHashes(ctx, uncachedHashes)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetTxsByHashFailure, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultMap := make(map[string]*types.TxHistoryInfo)
|
||||
for _, result := range dbResults {
|
||||
results = append(results, result)
|
||||
resultMap[result.Hash] = result
|
||||
}
|
||||
|
||||
for _, hash := range uncachedHashes {
|
||||
cacheKey := cacheKeyPrefixQueryTxsByHash + hash
|
||||
result, found := resultMap[hash]
|
||||
if found {
|
||||
c.cache.Set(cacheKey, result, cache.DefaultExpiration)
|
||||
} else {
|
||||
c.cache.Set(cacheKey, nil, cache.DefaultExpiration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Result: results, Total: uint64(len(results))}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: result, Total: 0})
|
||||
}
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type cacheMetrics struct {
|
||||
cacheHits *prometheus.CounterVec
|
||||
cacheMisses *prometheus.CounterVec
|
||||
}
|
||||
|
||||
var (
|
||||
initMetricsOnce sync.Once
|
||||
cm *cacheMetrics
|
||||
)
|
||||
|
||||
func initCacheMetrics() *cacheMetrics {
|
||||
initMetricsOnce.Do(func() {
|
||||
cm = &cacheMetrics{
|
||||
cacheHits: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "bridge_history_api_cache_hits_total",
|
||||
Help: "The total number of cache hits",
|
||||
},
|
||||
[]string{"api"},
|
||||
),
|
||||
cacheMisses: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "bridge_history_api_cache_misses_total",
|
||||
Help: "The total number of cache misses",
|
||||
},
|
||||
[]string{"api"},
|
||||
),
|
||||
}
|
||||
})
|
||||
return cm
|
||||
}
|
||||
@@ -23,101 +23,66 @@ func NewHistoryLogic(db *gorm.DB) *HistoryLogic {
|
||||
return logic
|
||||
}
|
||||
|
||||
// updateL2TxClaimInfo updates UserClaimInfos for each transaction history.
|
||||
func updateL2TxClaimInfo(ctx context.Context, txHistories []*types.TxHistoryInfo, db *gorm.DB) {
|
||||
// getCrossTxClaimInfo get UserClaimInfos by address
|
||||
func getCrossTxClaimInfo(ctx context.Context, msgHash string, db *gorm.DB) *types.UserClaimInfo {
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(db)
|
||||
rollupOrm := orm.NewRollupBatch(db)
|
||||
|
||||
var l2MsgHashes []string
|
||||
for _, txHistory := range txHistories {
|
||||
if !txHistory.IsL1 {
|
||||
l2MsgHashes = append(l2MsgHashes, txHistory.MsgHash)
|
||||
}
|
||||
l2sentMsg, err := l2SentMsgOrm.GetL2SentMsgByHash(ctx, msgHash)
|
||||
if err != nil || l2sentMsg == nil {
|
||||
log.Debug("getCrossTxClaimInfo failed", "error", err)
|
||||
return &types.UserClaimInfo{}
|
||||
}
|
||||
|
||||
l2sentMsgs, err := l2SentMsgOrm.GetL2SentMsgsByHashes(ctx, l2MsgHashes)
|
||||
if err != nil || len(l2sentMsgs) == 0 {
|
||||
log.Debug("GetL2SentMsgsByHashes failed", "l2 sent msgs", l2sentMsgs, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
l2MsgMap := make(map[string]*orm.L2SentMsg, len(l2sentMsgs))
|
||||
var batchIndexes []uint64
|
||||
for _, l2sentMsg := range l2sentMsgs {
|
||||
l2MsgMap[l2sentMsg.MsgHash] = l2sentMsg
|
||||
batchIndexes = append(batchIndexes, l2sentMsg.BatchIndex)
|
||||
}
|
||||
|
||||
batches, err := rollupOrm.GetRollupBatchesByIndexes(ctx, batchIndexes)
|
||||
batch, err := rollupOrm.GetRollupBatchByIndex(ctx, l2sentMsg.BatchIndex)
|
||||
if err != nil {
|
||||
log.Debug("GetRollupBatchesByIndexes failed", "error", err)
|
||||
return
|
||||
log.Debug("getCrossTxClaimInfo failed", "error", err)
|
||||
return &types.UserClaimInfo{}
|
||||
}
|
||||
return &types.UserClaimInfo{
|
||||
From: l2sentMsg.Sender,
|
||||
To: l2sentMsg.Target,
|
||||
Value: l2sentMsg.Value,
|
||||
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
|
||||
Message: l2sentMsg.MsgData,
|
||||
Proof: "0x" + l2sentMsg.MsgProof,
|
||||
BatchHash: batch.BatchHash,
|
||||
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
|
||||
}
|
||||
|
||||
batchMap := make(map[uint64]*orm.RollupBatch, len(batches))
|
||||
for _, batch := range batches {
|
||||
batchMap[batch.BatchIndex] = batch
|
||||
}
|
||||
|
||||
for _, txHistory := range txHistories {
|
||||
if txHistory.IsL1 {
|
||||
continue
|
||||
}
|
||||
|
||||
l2sentMsg, foundL2SentMsg := l2MsgMap[txHistory.MsgHash]
|
||||
batch, foundBatch := batchMap[l2sentMsg.BatchIndex]
|
||||
if foundL2SentMsg && foundBatch {
|
||||
txHistory.ClaimInfo = &types.UserClaimInfo{
|
||||
From: l2sentMsg.Sender,
|
||||
To: l2sentMsg.Target,
|
||||
Value: l2sentMsg.Value,
|
||||
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
|
||||
Message: l2sentMsg.MsgData,
|
||||
Proof: "0x" + l2sentMsg.MsgProof,
|
||||
BatchHash: batch.BatchHash,
|
||||
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateCrossTxHashes(ctx context.Context, txHistories []*types.TxHistoryInfo, db *gorm.DB) {
|
||||
msgHashes := make([]string, len(txHistories))
|
||||
for i, txHistory := range txHistories {
|
||||
msgHashes[i] = txHistory.MsgHash
|
||||
}
|
||||
|
||||
func updateCrossTxHash(ctx context.Context, msgHash string, txInfo *types.TxHistoryInfo, db *gorm.DB) {
|
||||
relayed := orm.NewRelayedMsg(db)
|
||||
relayedMsgs, err := relayed.GetRelayedMsgsByHashes(ctx, msgHashes)
|
||||
if err != nil || len(relayedMsgs) == 0 {
|
||||
log.Debug("GetRelayedMsgsByHashes failed", "msg hashes", msgHashes, "relayed msgs", relayedMsgs, "error", err)
|
||||
relayed, err := relayed.GetRelayedMsgByHash(ctx, msgHash)
|
||||
if err != nil {
|
||||
log.Debug("updateCrossTxHash failed", "error", err)
|
||||
return
|
||||
}
|
||||
if relayed == nil {
|
||||
return
|
||||
}
|
||||
if relayed.Layer1Hash != "" {
|
||||
txInfo.FinalizeTx.Hash = relayed.Layer1Hash
|
||||
txInfo.FinalizeTx.BlockNumber = relayed.Height
|
||||
return
|
||||
}
|
||||
if relayed.Layer2Hash != "" {
|
||||
txInfo.FinalizeTx.Hash = relayed.Layer2Hash
|
||||
txInfo.FinalizeTx.BlockNumber = relayed.Height
|
||||
return
|
||||
}
|
||||
|
||||
relayedMsgMap := make(map[string]*orm.RelayedMsg, len(relayedMsgs))
|
||||
for _, relayedMsg := range relayedMsgs {
|
||||
relayedMsgMap[relayedMsg.MsgHash] = relayedMsg
|
||||
}
|
||||
|
||||
for _, txHistory := range txHistories {
|
||||
if relayedMsg, found := relayedMsgMap[txHistory.MsgHash]; found {
|
||||
txHistory.FinalizeTx.Hash = relayedMsg.Layer1Hash + relayedMsg.Layer2Hash
|
||||
txHistory.FinalizeTx.BlockNumber = relayedMsg.Height
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateCrossTxHashesAndL2TxClaimInfo(ctx context.Context, txHistories []*types.TxHistoryInfo, db *gorm.DB) {
|
||||
updateCrossTxHashes(ctx, txHistories, db)
|
||||
updateL2TxClaimInfo(ctx, txHistories, db)
|
||||
}
|
||||
|
||||
// GetClaimableTxsByAddress get all claimable txs under given address
|
||||
func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address common.Address) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address common.Address, offset int, limit int) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(h.db)
|
||||
l2CrossMsgOrm := orm.NewCrossMsg(h.db)
|
||||
results, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddress(ctx, address.Hex())
|
||||
total, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressTotalNum(ctx, address.Hex())
|
||||
if err != nil || total == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
results, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressWithOffset(ctx, address.Hex(), offset, limit)
|
||||
if err != nil || len(results) == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
@@ -137,10 +102,10 @@ func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address com
|
||||
for _, result := range results {
|
||||
txInfo := &types.TxHistoryInfo{
|
||||
Hash: result.TxHash,
|
||||
MsgHash: result.MsgHash,
|
||||
IsL1: false,
|
||||
BlockNumber: result.Height,
|
||||
FinalizeTx: &types.Finalized{},
|
||||
ClaimInfo: getCrossTxClaimInfo(ctx, result.MsgHash, h.db),
|
||||
}
|
||||
if crossMsg, exist := crossMsgMap[result.MsgHash]; exist {
|
||||
txInfo.Amount = crossMsg.Amount
|
||||
@@ -152,36 +117,96 @@ func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address com
|
||||
}
|
||||
txHistories = append(txHistories, txInfo)
|
||||
}
|
||||
updateL2TxClaimInfo(ctx, txHistories, h.db)
|
||||
return txHistories, uint64(len(results)), err
|
||||
return txHistories, total, err
|
||||
}
|
||||
|
||||
// GetTxsByAddress get all txs under given address
|
||||
func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address common.Address, offset int, limit int) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
utilOrm := orm.NewCrossMsg(h.db)
|
||||
total, err := utilOrm.GetTotalCrossMsgCountByAddress(ctx, address.String())
|
||||
if err != nil || total == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
result, err := utilOrm.GetCrossMsgsByAddressWithOffset(ctx, address.String(), offset, limit)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
for _, msg := range result {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
Hash: msg.Layer1Hash + msg.Layer2Hash,
|
||||
Amount: msg.Amount,
|
||||
To: msg.Target,
|
||||
L1Token: msg.Layer1Token,
|
||||
L2Token: msg.Layer2Token,
|
||||
IsL1: msg.MsgType == int(orm.Layer1Msg),
|
||||
BlockNumber: msg.Height,
|
||||
BlockTimestamp: msg.Timestamp,
|
||||
CreatedAt: msg.CreatedAt,
|
||||
FinalizeTx: &types.Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
ClaimInfo: getCrossTxClaimInfo(ctx, msg.MsgHash, h.db),
|
||||
}
|
||||
updateCrossTxHash(ctx, msg.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
}
|
||||
return txHistories, total, nil
|
||||
}
|
||||
|
||||
// GetTxsByHashes get tx infos under given tx hashes
|
||||
func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, hashes []string) ([]*types.TxHistoryInfo, error) {
|
||||
txHistories := make([]*types.TxHistoryInfo, 0)
|
||||
CrossMsgOrm := orm.NewCrossMsg(h.db)
|
||||
results, err := CrossMsgOrm.GetCrossMsgsByHashes(ctx, hashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
for _, result := range results {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
Hash: result.Layer1Hash + result.Layer2Hash,
|
||||
MsgHash: result.MsgHash,
|
||||
Amount: result.Amount,
|
||||
To: result.Target,
|
||||
L1Token: result.Layer1Token,
|
||||
L2Token: result.Layer2Token,
|
||||
IsL1: orm.MsgType(result.MsgType) == orm.Layer1Msg,
|
||||
BlockNumber: result.Height,
|
||||
BlockTimestamp: result.Timestamp,
|
||||
CreatedAt: result.CreatedAt,
|
||||
FinalizeTx: &types.Finalized{Hash: ""},
|
||||
for _, hash := range hashes {
|
||||
l1result, err := CrossMsgOrm.GetL1CrossMsgByHash(ctx, common.HexToHash(hash))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if l1result != nil {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
Hash: l1result.Layer1Hash,
|
||||
Amount: l1result.Amount,
|
||||
To: l1result.Target,
|
||||
IsL1: true,
|
||||
L1Token: l1result.Layer1Token,
|
||||
L2Token: l1result.Layer2Token,
|
||||
BlockNumber: l1result.Height,
|
||||
BlockTimestamp: l1result.Timestamp,
|
||||
CreatedAt: l1result.CreatedAt,
|
||||
FinalizeTx: &types.Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
}
|
||||
updateCrossTxHash(ctx, l1result.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
continue
|
||||
}
|
||||
l2result, err := CrossMsgOrm.GetL2CrossMsgByHash(ctx, common.HexToHash(hash))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if l2result != nil {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
Hash: l2result.Layer2Hash,
|
||||
Amount: l2result.Amount,
|
||||
To: l2result.Target,
|
||||
IsL1: false,
|
||||
L1Token: l2result.Layer1Token,
|
||||
L2Token: l2result.Layer2Token,
|
||||
BlockNumber: l2result.Height,
|
||||
BlockTimestamp: l2result.Timestamp,
|
||||
CreatedAt: l2result.CreatedAt,
|
||||
FinalizeTx: &types.Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
ClaimInfo: getCrossTxClaimInfo(ctx, l2result.MsgHash, h.db),
|
||||
}
|
||||
updateCrossTxHash(ctx, l2result.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
continue
|
||||
}
|
||||
txHistories = append(txHistories, txHistory)
|
||||
}
|
||||
|
||||
updateCrossTxHashesAndL2TxClaimInfo(ctx, txHistories, h.db)
|
||||
return txHistories, nil
|
||||
}
|
||||
|
||||
@@ -5,15 +5,13 @@ import (
|
||||
|
||||
"github.com/gin-contrib/cors"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/internal/controller"
|
||||
"bridge-history-api/observability"
|
||||
)
|
||||
|
||||
// Route routes the APIs
|
||||
func Route(router *gin.Engine, conf *config.Config, reg prometheus.Registerer) {
|
||||
func Route(router *gin.Engine, conf *config.Config) {
|
||||
router.Use(cors.New(cors.Config{
|
||||
AllowOrigins: []string{"*"},
|
||||
AllowMethods: []string{"GET", "POST", "PUT", "DELETE"},
|
||||
@@ -22,9 +20,9 @@ func Route(router *gin.Engine, conf *config.Config, reg prometheus.Registerer) {
|
||||
MaxAge: 12 * time.Hour,
|
||||
}))
|
||||
|
||||
observability.Use(router, "bridge_history_api", reg)
|
||||
|
||||
r := router.Group("api/")
|
||||
r.GET("/txs", controller.HistoryCtrler.GetAllTxsByAddr)
|
||||
r.POST("/txsbyhashes", controller.HistoryCtrler.PostQueryTxsByHash)
|
||||
r.GET("/claimable", controller.HistoryCtrler.GetAllClaimableTxsByAddr)
|
||||
r.GET("/withdraw_root", controller.BatchCtrler.GetWithdrawRootByBatchIndex)
|
||||
}
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
const (
|
||||
// Success shows OK.
|
||||
Success = 0
|
||||
// InternalServerError shows a fatal error in the server
|
||||
InternalServerError = 500
|
||||
// ErrParameterInvalidNo is invalid params
|
||||
ErrParameterInvalidNo = 40001
|
||||
// ErrGetClaimablesFailure is getting all claimables txs error
|
||||
@@ -26,7 +24,9 @@ const (
|
||||
|
||||
// QueryByAddressRequest the request parameter of address api
|
||||
type QueryByAddressRequest struct {
|
||||
Address string `form:"address" binding:"required"`
|
||||
Address string `form:"address" binding:"required"`
|
||||
Page int `form:"page" binding:"required"`
|
||||
PageSize int `form:"page_size" binding:"required"`
|
||||
}
|
||||
|
||||
// QueryByHashRequest the request parameter of hash api
|
||||
@@ -78,7 +78,6 @@ type UserClaimInfo struct {
|
||||
// TxHistoryInfo the schema of tx history infos
|
||||
type TxHistoryInfo struct {
|
||||
Hash string `json:"hash"`
|
||||
MsgHash string `json:"msgHash"`
|
||||
Amount string `json:"amount"`
|
||||
To string `json:"to"` // useless
|
||||
IsL1 bool `json:"isL1"`
|
||||
@@ -104,28 +103,3 @@ func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
|
||||
}
|
||||
ctx.JSON(http.StatusOK, renderData)
|
||||
}
|
||||
|
||||
// RenderSuccess renders success response with json
|
||||
func RenderSuccess(ctx *gin.Context, data interface{}) {
|
||||
RenderJSON(ctx, Success, nil, data)
|
||||
}
|
||||
|
||||
// RenderFailure renders failure response with json
|
||||
func RenderFailure(ctx *gin.Context, errCode int, err error) {
|
||||
RenderJSON(ctx, errCode, err, nil)
|
||||
}
|
||||
|
||||
// RenderFatal renders fatal response with json
|
||||
func RenderFatal(ctx *gin.Context, err error) {
|
||||
var errMsg string
|
||||
if err != nil {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
renderData := Response{
|
||||
ErrCode: InternalServerError,
|
||||
ErrMsg: errMsg,
|
||||
Data: nil,
|
||||
}
|
||||
ctx.Set("errcode", InternalServerError)
|
||||
ctx.JSON(http.StatusInternalServerError, renderData)
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
package observability
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/internal/types"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// ProbesController probe check controller
|
||||
type ProbesController struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// NewProbesController returns an ProbesController instance
|
||||
func NewProbesController(db *gorm.DB) *ProbesController {
|
||||
return &ProbesController{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// HealthCheck the api controller for health check
|
||||
func (a *ProbesController) HealthCheck(c *gin.Context) {
|
||||
if _, err := utils.Ping(a.db); err != nil {
|
||||
types.RenderFatal(c, err)
|
||||
return
|
||||
}
|
||||
types.RenderSuccess(c, nil)
|
||||
}
|
||||
|
||||
// Ready the api controller for ready check
|
||||
func (a *ProbesController) Ready(c *gin.Context) {
|
||||
types.RenderSuccess(c, nil)
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
package observability
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
// enable the pprof
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/gin-contrib/pprof"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// Server starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Server(c *cli.Context, db *gorm.DB) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
r := gin.New()
|
||||
r.Use(gin.Recovery())
|
||||
pprof.Register(r)
|
||||
r.GET("/metrics", func(context *gin.Context) {
|
||||
promhttp.Handler().ServeHTTP(context.Writer, context.Request)
|
||||
})
|
||||
|
||||
probeController := NewProbesController(db)
|
||||
r.GET("/health", probeController.HealthCheck)
|
||||
r.GET("/ready", probeController.Ready)
|
||||
|
||||
address := fmt.Sprintf(":%s", c.String(utils.MetricsPort.Name))
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
Handler: r,
|
||||
ReadHeaderTimeout: time.Minute,
|
||||
}
|
||||
log.Info("Starting metrics server", "address", address)
|
||||
|
||||
go func() {
|
||||
if runServerErr := server.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
|
||||
log.Crit("run metrics http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -71,16 +71,6 @@ func (r *RollupBatch) GetRollupBatchByIndex(ctx context.Context, index uint64) (
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetRollupBatchesByIndexes return the rollup batches by indexes
|
||||
func (r *RollupBatch) GetRollupBatchesByIndexes(ctx context.Context, indexes []uint64) ([]*RollupBatch, error) {
|
||||
var results []*RollupBatch
|
||||
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_index IN (?)", indexes).Find(&results).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("RollupBatch.GetRollupBatchesByIndexes error: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// InsertRollupBatch batch insert rollup batch into db and return the transaction
|
||||
func (r *RollupBatch) InsertRollupBatch(ctx context.Context, batches []*RollupBatch, dbTx ...*gorm.DB) error {
|
||||
if len(batches) == 0 {
|
||||
|
||||
@@ -368,14 +368,3 @@ func (c *CrossMsg) GetCrossMsgsByAddressWithOffset(ctx context.Context, sender s
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// GetCrossMsgsByHashes retrieves a list of cross messages identified by their Layer 1 or Layer 2 hashes.
|
||||
func (c *CrossMsg) GetCrossMsgsByHashes(ctx context.Context, hashes []string) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer1_hash IN (?) OR layer2_hash IN (?)", hashes, hashes).Find(&results).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CrossMsg.GetCrossMsgsByHashes error: %w", err)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -55,19 +54,6 @@ func (l *L2SentMsg) GetL2SentMsgByHash(ctx context.Context, msgHash string) (*L2
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetL2SentMsgsByHashes get l2 sent msgs by hashes
|
||||
func (l *L2SentMsg) GetL2SentMsgsByHashes(ctx context.Context, msgHashes []string) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("msg_hash IN (?)", msgHashes).
|
||||
Find(&results).
|
||||
Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("L2SentMsg.GetL2SentMsgsByHashes error: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetLatestSentMsgHeightOnL2 get latest sent msg height on l2
|
||||
func (l *L2SentMsg) GetLatestSentMsgHeightOnL2(ctx context.Context) (uint64, error) {
|
||||
var result L2SentMsg
|
||||
@@ -86,61 +72,26 @@ func (l *L2SentMsg) GetLatestSentMsgHeightOnL2(ctx context.Context) (uint64, err
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// GetClaimableL2SentMsgByAddress returns both the total number of unclaimed messages and a paginated list of those messages.
|
||||
// TODO: Add metrics about the result set sizes (total/claimed/unclaimed messages).
|
||||
func (l *L2SentMsg) GetClaimableL2SentMsgByAddress(ctx context.Context, address string) ([]*L2SentMsg, error) {
|
||||
var totalMsgs []*L2SentMsg
|
||||
db := l.db.WithContext(ctx)
|
||||
db = db.Table("l2_sent_msg")
|
||||
db = db.Where("original_sender = ? OR sender = ?", address, address)
|
||||
db = db.Where("msg_proof != ''")
|
||||
db = db.Where("deleted_at IS NULL")
|
||||
db = db.Order("id DESC")
|
||||
tx := db.Find(&totalMsgs)
|
||||
if tx.Error != nil || tx.RowsAffected == 0 {
|
||||
return nil, tx.Error
|
||||
// GetClaimableL2SentMsgByAddressWithOffset get claimable l2 sent msg by address with offset
|
||||
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressWithOffset(ctx context.Context, address string, offset int, limit int) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
err := l.db.WithContext(ctx).Raw(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='' ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset).
|
||||
Scan(&results).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressWithOffset error: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// Note on the use of IN vs VALUES in SQL Queries:
|
||||
// ------------------------------------------------
|
||||
// When using the IN predicate with a large list (>100) of values, performance may suffer.
|
||||
// An alternative approach is to use constant subqueries with the VALUES construct.
|
||||
// For more details and optimization tips, visit:
|
||||
// https://postgres.cz/wiki/PostgreSQL_SQL_Tricks_I#Predicate_IN_optimalization
|
||||
//
|
||||
// Example using IN:
|
||||
// SELECT * FROM tab WHERE x IN (1,2,3,...,n); -- where n > 70
|
||||
//
|
||||
// Optimized example using VALUES:
|
||||
// SELECT * FROM tab WHERE x IN (VALUES(10), (20));
|
||||
//
|
||||
var valuesStr string
|
||||
for _, msg := range totalMsgs {
|
||||
valuesStr += fmt.Sprintf("('%s'),", msg.MsgHash)
|
||||
// GetClaimableL2SentMsgByAddressTotalNum get claimable l2 sent msg by address total num
|
||||
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressTotalNum(ctx context.Context, address string) (uint64, error) {
|
||||
var count uint64
|
||||
err := l.db.WithContext(ctx).Raw(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='';`, address).
|
||||
Scan(&count).Error
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressTotalNum error: %w", err)
|
||||
}
|
||||
valuesStr = strings.TrimSuffix(valuesStr, ",")
|
||||
|
||||
var claimedMsgHashes []string
|
||||
db = l.db.WithContext(ctx)
|
||||
db = db.Table("relayed_msg")
|
||||
db = db.Where(fmt.Sprintf("msg_hash IN (VALUES %s)", valuesStr))
|
||||
db = db.Where("deleted_at IS NULL")
|
||||
if err := db.Pluck("msg_hash", &claimedMsgHashes).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
claimedMsgHashSet := make(map[string]struct{})
|
||||
for _, hash := range claimedMsgHashes {
|
||||
claimedMsgHashSet[hash] = struct{}{}
|
||||
}
|
||||
var unclaimedL2Msgs []*L2SentMsg
|
||||
for _, msg := range totalMsgs {
|
||||
if _, found := claimedMsgHashSet[msg.MsgHash]; !found {
|
||||
unclaimedL2Msgs = append(unclaimedL2Msgs, msg)
|
||||
}
|
||||
}
|
||||
|
||||
return unclaimedL2Msgs, nil
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetLatestL2SentMsgBatchIndex get latest l2 sent msg batch index
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bridge-history-api/orm/migrate"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
func TestGetClaimableL2SentMsgByAddress(t *testing.T) {
|
||||
base := docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
db, err := database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
l2SentMsgOrm := NewL2SentMsg(db)
|
||||
relayedMsgOrm := NewRelayedMsg(db)
|
||||
|
||||
msgs, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddress(context.Background(), "sender1")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, msgs, 0)
|
||||
|
||||
l2SentMsgs := []*L2SentMsg{
|
||||
{
|
||||
Sender: "sender1",
|
||||
MsgHash: "hash1",
|
||||
MsgProof: "proof1",
|
||||
Nonce: 0,
|
||||
},
|
||||
{
|
||||
OriginalSender: "sender1",
|
||||
MsgHash: "hash2",
|
||||
MsgProof: "proof2",
|
||||
Nonce: 1,
|
||||
},
|
||||
{
|
||||
OriginalSender: "sender1",
|
||||
MsgHash: "hash3",
|
||||
MsgProof: "",
|
||||
Nonce: 2,
|
||||
},
|
||||
}
|
||||
relayedMsgs := []*RelayedMsg{
|
||||
{
|
||||
MsgHash: "hash2",
|
||||
},
|
||||
{
|
||||
MsgHash: "hash3",
|
||||
},
|
||||
}
|
||||
err = l2SentMsgOrm.InsertL2SentMsg(context.Background(), l2SentMsgs)
|
||||
assert.NoError(t, err)
|
||||
err = relayedMsgOrm.InsertRelayedMsg(context.Background(), relayedMsgs)
|
||||
assert.NoError(t, err)
|
||||
|
||||
msgs, err = l2SentMsgOrm.GetClaimableL2SentMsgByAddress(context.Background(), "sender1")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, msgs, 1)
|
||||
assert.Equal(t, "hash1", msgs[0].MsgHash)
|
||||
}
|
||||
@@ -49,19 +49,6 @@ func (r *RelayedMsg) GetRelayedMsgByHash(ctx context.Context, msgHash string) (*
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetRelayedMsgsByHashes get relayed msg by hash array
|
||||
func (r *RelayedMsg) GetRelayedMsgsByHashes(ctx context.Context, msgHashes []string) ([]*RelayedMsg, error) {
|
||||
var results []*RelayedMsg
|
||||
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
|
||||
Where("msg_hash IN (?)", msgHashes).
|
||||
Find(&results).
|
||||
Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("RelayedMsg.GetRelayedMsgsByHashes error: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetLatestRelayedHeightOnL1 get latest relayed height on l1
|
||||
func (r *RelayedMsg) GetLatestRelayedHeightOnL1(ctx context.Context) (uint64, error) {
|
||||
var result RelayedMsg
|
||||
|
||||
@@ -2,7 +2,6 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -68,29 +67,18 @@ func InitDB(config *config.DBConfig) (*gorm.DB, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sqlDB, pingErr := Ping(db)
|
||||
if pingErr != nil {
|
||||
return nil, pingErr
|
||||
}
|
||||
|
||||
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
|
||||
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Ping check db status
|
||||
func Ping(db *gorm.DB) (*sql.DB, error) {
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
|
||||
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
|
||||
|
||||
if err = sqlDB.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sqlDB, nil
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// CloseDB close the db handler. notice the db handler only can close when then program exit.
|
||||
|
||||
@@ -14,6 +14,14 @@ import (
|
||||
"bridge-history-api/orm"
|
||||
)
|
||||
|
||||
// CachedParsedTxCalldata store parsed batch infos
|
||||
type CachedParsedTxCalldata struct {
|
||||
CallDataIndex uint64
|
||||
BatchIndices []uint64
|
||||
StartBlocks []uint64
|
||||
EndBlocks []uint64
|
||||
}
|
||||
|
||||
// ParseBackendL1EventLogs parses L1 watched events
|
||||
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
|
||||
0
rollup/.gitignore → bridge/.gitignore
vendored
0
rollup/.gitignore → bridge/.gitignore
vendored
@@ -1,15 +1,17 @@
|
||||
.PHONY: mock_abi rollup_bins event_watcher gas_oracle rollup_relayer test lint clean docker
|
||||
.PHONY: lint docker clean bridge
|
||||
|
||||
IMAGE_NAME=bridge
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
mock_abi:
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./rollup/mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out ./rollup/mock_bridge/MockBridgeL1.go
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./rollup/mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out ./rollup/mock_bridge/MockBridgeL2.go
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL1.go
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL2.go
|
||||
|
||||
rollup_bins: ## Builds the Rollup bins.
|
||||
bridge_bins: ## Builds the Bridge bins.
|
||||
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
event_watcher: ## Builds the event_watcher bin
|
||||
@@ -18,6 +20,9 @@ event_watcher: ## Builds the event_watcher bin
|
||||
gas_oracle: ## Builds the gas_oracle bin
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
|
||||
message_relayer: ## Builds the message_relayer bin
|
||||
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
|
||||
|
||||
rollup_relayer: ## Builds the rollup_relayer bin
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
@@ -34,8 +39,10 @@ docker_push:
|
||||
docker docker push scrolltech/gas-oracle:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/event-watcher:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/msg-relayer:${IMAGE_VERSION}
|
||||
|
||||
docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/gas-oracle:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/gas_oracle.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/event-watcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/event_watcher.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/rollup-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/rollup_relayer.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/msg-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/msg_relayer.Dockerfile
|
||||
36
bridge/README.md
Normal file
36
bridge/README.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Bridge
|
||||
|
||||
This repo contains the Scroll bridge.
|
||||
|
||||
In addition, launching the bridge will launch a separate instance of l2geth, and sets up a communication channel
|
||||
between the two, over JSON-RPC sockets.
|
||||
|
||||
Something we should pay attention is that all private keys inside sender instance cannot be duplicated.
|
||||
|
||||
## Dependency
|
||||
|
||||
+ install `abigen`
|
||||
|
||||
``` bash
|
||||
go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
|
||||
```
|
||||
|
||||
## Build
|
||||
|
||||
```bash
|
||||
make clean
|
||||
make bridge
|
||||
```
|
||||
|
||||
## Start
|
||||
* use default ports and config.json
|
||||
|
||||
```bash
|
||||
./build/bin/bridge --http
|
||||
```
|
||||
|
||||
* use specified ports and config.json
|
||||
|
||||
```bash
|
||||
./build/bin/bridge --config ./config.json --http --http.addr localhost --http.port 8290
|
||||
```
|
||||
File diff suppressed because one or more lines are too long
@@ -11,13 +11,21 @@ import (
|
||||
func TestEventSignature(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
assert.Equal(L1SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
|
||||
assert.Equal(L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2c32d4ae151744d0bf0b9464a3e897a1d17ed2f1af71f7c9a75f12ce0d28238f"))
|
||||
assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("26ba82f907317eedc97d0cbef23de76a43dd6edb563bdb6e9407645b950a7a2d"))
|
||||
|
||||
assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e"))
|
||||
|
||||
assert.Equal(L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
|
||||
assert.Equal(L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(L2ImportBlockEventSignature, common.HexToHash("a7823f45e1ee21f9530b77959b57507ad515a14fa9fa24d262ee80e79b2b5745"))
|
||||
|
||||
assert.Equal(L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693"))
|
||||
}
|
||||
|
||||
@@ -109,3 +117,12 @@ func TestPackSetL2BaseFee(t *testing.T) {
|
||||
_, err = l2GasOracleABI.Pack("setL2BaseFee", baseFee)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackImportBlock(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1BlockContainerABI := L1BlockContainerABI
|
||||
|
||||
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, []byte{}, false)
|
||||
assert.NoError(err)
|
||||
}
|
||||
@@ -13,12 +13,12 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -61,7 +61,7 @@ func action(ctx *cli.Context) error {
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
@@ -75,7 +75,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
|
||||
@@ -89,7 +89,6 @@ func action(ctx *cli.Context) error {
|
||||
// Start l2 watcher process
|
||||
go utils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
|
||||
// Finish start all l2 functions
|
||||
|
||||
log.Info("Start event-watcher successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
7
bridge/cmd/event_watcher/main.go
Normal file
7
bridge/cmd/event_watcher/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/event_watcher/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -10,18 +10,17 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
butils "scroll-tech/rollup/internal/utils"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
butils "scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -64,7 +63,7 @@ func action(ctx *cli.Context) error {
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
@@ -79,7 +78,8 @@ func action(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
|
||||
if err != nil {
|
||||
@@ -93,16 +93,14 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
// Start l1 watcher process
|
||||
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
|
||||
// Fetch the latest block number to decrease the delay when fetching gas prices
|
||||
// Use latest block number - 1 to prevent frequent reorg
|
||||
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, rpc.LatestBlockNumber)
|
||||
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
|
||||
if loopErr != nil {
|
||||
log.Error("failed to get block number", "err", loopErr)
|
||||
return
|
||||
}
|
||||
|
||||
if loopErr = l1watcher.FetchBlockHeader(number - 1); loopErr != nil {
|
||||
log.Error("Failed to fetch L1 block header", "lastest", number-1, "err", loopErr)
|
||||
if loopErr = l1watcher.FetchBlockHeader(number); loopErr != nil {
|
||||
log.Error("Failed to fetch L1 block header", "lastest", number, "err", loopErr)
|
||||
}
|
||||
})
|
||||
|
||||
7
bridge/cmd/gas_oracle/main.go
Normal file
7
bridge/cmd/gas_oracle/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/gas_oracle/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
)
|
||||
|
||||
// MockApp mockApp-test client manager.
|
||||
@@ -22,29 +22,29 @@ type MockApp struct {
|
||||
mockApps map[utils.MockAppName]docker.AppAPI
|
||||
|
||||
originFile string
|
||||
rollupFile string
|
||||
bridgeFile string
|
||||
|
||||
args []string
|
||||
}
|
||||
|
||||
// NewRollupApp return a new rollupApp manager, name mush be one them.
|
||||
func NewRollupApp(base *docker.App, file string) *MockApp {
|
||||
// NewBridgeApp return a new bridgeApp manager, name mush be one them.
|
||||
func NewBridgeApp(base *docker.App, file string) *MockApp {
|
||||
|
||||
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", base.Timestamp)
|
||||
rollupApp := &MockApp{
|
||||
bridgeFile := fmt.Sprintf("/tmp/%d_bridge-config.json", base.Timestamp)
|
||||
bridgeApp := &MockApp{
|
||||
base: base,
|
||||
mockApps: make(map[utils.MockAppName]docker.AppAPI),
|
||||
originFile: file,
|
||||
rollupFile: rollupFile,
|
||||
args: []string{"--log.debug", "--config", rollupFile},
|
||||
bridgeFile: bridgeFile,
|
||||
args: []string{"--log.debug", "--config", bridgeFile},
|
||||
}
|
||||
if err := rollupApp.MockConfig(true); err != nil {
|
||||
if err := bridgeApp.MockConfig(true); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return rollupApp
|
||||
return bridgeApp
|
||||
}
|
||||
|
||||
// RunApp run rollup-test child process by multi parameters.
|
||||
// RunApp run bridge-test child process by multi parameters.
|
||||
func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) {
|
||||
if !(name == utils.EventWatcherApp ||
|
||||
name == utils.GasOracleApp ||
|
||||
@@ -72,16 +72,16 @@ func (b *MockApp) WaitExit() {
|
||||
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
|
||||
}
|
||||
|
||||
// Free stop and release rollup mocked apps.
|
||||
// Free stop and release bridge mocked apps.
|
||||
func (b *MockApp) Free() {
|
||||
b.WaitExit()
|
||||
_ = os.Remove(b.rollupFile)
|
||||
_ = os.Remove(b.bridgeFile)
|
||||
}
|
||||
|
||||
// MockConfig creates a new rollup config.
|
||||
// MockConfig creates a new bridge config.
|
||||
func (b *MockApp) MockConfig(store bool) error {
|
||||
base := b.base
|
||||
// Load origin rollup config file.
|
||||
// Load origin bridge config file.
|
||||
cfg, err := config.NewConfig(b.originFile)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -97,10 +97,10 @@ func (b *MockApp) MockConfig(store bool) error {
|
||||
if !store {
|
||||
return nil
|
||||
}
|
||||
// Store changed rollup config into a temp file.
|
||||
// Store changed bridge config into a temp file.
|
||||
data, err := json.Marshal(b.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(b.rollupFile, data, 0600)
|
||||
return os.WriteFile(b.bridgeFile, data, 0600)
|
||||
}
|
||||
94
bridge/cmd/msg_relayer/app/app.go
Normal file
94
bridge/cmd/msg_relayer/app/app.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up message-relayer app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "message-relayer"
|
||||
app.Usage = "The Scroll Message Relayer"
|
||||
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Register `message-relayer-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.MessageRelayerApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Start l1relayer process
|
||||
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start message-relayer successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run message_relayer cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
7
bridge/cmd/msg_relayer/main.go
Normal file
7
bridge/cmd/msg_relayer/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/msg_relayer/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -13,14 +13,14 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
butils "scroll-tech/rollup/internal/utils"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
butils "scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -64,7 +64,7 @@ func action(ctx *cli.Context) error {
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
7
bridge/cmd/rollup_relayer/main.go
Normal file
7
bridge/cmd/rollup_relayer/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/rollup_relayer/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -2,13 +2,15 @@
|
||||
"l1_config": {
|
||||
"confirmations": "0x6",
|
||||
"endpoint": "DUMMY_ENDPOINT",
|
||||
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"l1_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"scroll_chain_address": "0x0000000000000000000000000000000000000000",
|
||||
"start_height": 0,
|
||||
"relayer_config": {
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "https://sepolia-rpc.scroll.io",
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"check_pending_time": 2,
|
||||
"check_balance_time": 100,
|
||||
"escalate_blocks": 100,
|
||||
@@ -24,19 +26,22 @@
|
||||
"min_gas_price": 0,
|
||||
"gas_price_diff": 50000
|
||||
},
|
||||
"finalize_batch_interval_sec": 0,
|
||||
"message_sender_private_key": "1212121212121212121212121212121212121212121212121212121212121212",
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
|
||||
}
|
||||
},
|
||||
"l2_config": {
|
||||
"confirmations": "0x1",
|
||||
"endpoint": "https://sepolia-rpc.scroll.io",
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "https://sepolia-rpc.scroll.io",
|
||||
"endpoint": "DUMMY_ENDPOINT",
|
||||
"check_pending_time": 10,
|
||||
"check_balance_time": 100,
|
||||
"escalate_blocks": 100,
|
||||
@@ -52,22 +57,14 @@
|
||||
"min_gas_price": 0,
|
||||
"gas_price_diff": 50000
|
||||
},
|
||||
"chain_monitor": {
|
||||
"enabled": false,
|
||||
"timeout": 3,
|
||||
"try_times": 5,
|
||||
"base_url": "http://localhost:8750"
|
||||
},
|
||||
"enable_test_env_bypass_features": true,
|
||||
"finalize_batch_without_proof_timeout_sec": 7200,
|
||||
"finalize_batch_interval_sec": 0,
|
||||
"message_sender_private_key": "1212121212121212121212121212121212121212121212121212121212121212",
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313",
|
||||
"commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414",
|
||||
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515",
|
||||
"l1_commit_gas_limit_multiplier": 1.2
|
||||
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"max_block_num_per_chunk": 100,
|
||||
"max_tx_num_per_chunk": 100,
|
||||
"max_tx_num_per_chunk": 1123,
|
||||
"max_l1_commit_gas_per_chunk": 11234567,
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"chunk_timeout_sec": 300,
|
||||
@@ -84,7 +81,7 @@
|
||||
},
|
||||
"db_config": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://localhost/scroll?sslmode=disable",
|
||||
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
}
|
||||
@@ -1,14 +1,12 @@
|
||||
module scroll-tech/rollup
|
||||
module scroll-tech/bridge
|
||||
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
@@ -18,20 +16,12 @@ require (
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/bytedance/sonic v1.9.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.1 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
@@ -43,19 +33,13 @@ require (
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
@@ -70,17 +54,12 @@ require (
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
@@ -13,20 +13,13 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
||||
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
|
||||
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
@@ -34,28 +27,11 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
|
||||
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
@@ -65,7 +41,6 @@ github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXi
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
@@ -92,21 +67,14 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
@@ -119,19 +87,12 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -156,8 +117,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc h1:eK3NOpjgm/b2TQ6rYqWx92Zri0kBuxf6gKjjsVxWKr8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28 h1:CECBTWhZ5NGAn8lGFB4ooRAYxZns8PXoX8kTR/14c04=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
|
||||
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -170,15 +131,6 @@ github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL
|
||||
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
@@ -186,21 +138,14 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
|
||||
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@@ -209,9 +154,6 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -221,22 +163,15 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
@@ -251,9 +186,7 @@ gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHN
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
|
||||
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
@@ -18,7 +18,7 @@ func TestConfig(t *testing.T) {
|
||||
data, err := json.Marshal(cfg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tmpJSON := fmt.Sprintf("/tmp/%d_rollup_config.json", time.Now().Nanosecond())
|
||||
tmpJSON := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
|
||||
defer func() {
|
||||
if _, err = os.Stat(tmpJSON); err == nil {
|
||||
assert.NoError(t, os.Remove(tmpJSON))
|
||||
@@ -13,6 +13,8 @@ type L1Config struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The start height to sync event from layer 1
|
||||
StartHeight uint64 `json:"start_height"`
|
||||
// The L1ScrollMessenger contract address deployed on layer 1 chain.
|
||||
L1MessengerAddress common.Address `json:"l1_messenger_address"`
|
||||
// The L1MessageQueue contract address deployed on layer 1 chain.
|
||||
L1MessageQueueAddress common.Address `json:"l1_message_queue_address"`
|
||||
// The ScrollChain contract address deployed on layer 1 chain.
|
||||
@@ -28,7 +28,6 @@ type L2Config struct {
|
||||
|
||||
// ChunkProposerConfig loads chunk_proposer configuration items.
|
||||
type ChunkProposerConfig struct {
|
||||
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
|
||||
MaxTxNumPerChunk uint64 `json:"max_tx_num_per_chunk"`
|
||||
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
|
||||
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
|
||||
@@ -37,38 +37,28 @@ type SenderConfig struct {
|
||||
PendingLimit int `json:"pending_limit"`
|
||||
}
|
||||
|
||||
// ChainMonitor this config is used to get batch status from chain_monitor API.
|
||||
type ChainMonitor struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
TimeOut int `json:"timeout"`
|
||||
TryTimes int `json:"try_times"`
|
||||
BaseURL string `json:"base_url"`
|
||||
}
|
||||
|
||||
// RelayerConfig loads relayer configuration items.
|
||||
// What we need to pay attention to is that
|
||||
type RelayerConfig struct {
|
||||
// RollupContractAddress store the rollup contract address.
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
|
||||
// MessengerContractAddress store the scroll messenger contract address.
|
||||
MessengerContractAddress common.Address `json:"messenger_contract_address"`
|
||||
// GasPriceOracleContractAddress store the scroll messenger contract address.
|
||||
GasPriceOracleContractAddress common.Address `json:"gas_price_oracle_contract_address"`
|
||||
// sender config
|
||||
SenderConfig *SenderConfig `json:"sender_config"`
|
||||
// gas oracle config
|
||||
GasOracleConfig *GasOracleConfig `json:"gas_oracle_config"`
|
||||
// ChainMonitor config of monitoring service
|
||||
ChainMonitor *ChainMonitor `json:"chain_monitor"`
|
||||
// L1CommitGasLimitMultiplier multiplier for fallback gas limit in commitBatch txs
|
||||
L1CommitGasLimitMultiplier float64 `json:"l1_commit_gas_limit_multiplier,omitempty"`
|
||||
// The interval in which we send finalize batch transactions.
|
||||
FinalizeBatchIntervalSec uint64 `json:"finalize_batch_interval_sec"`
|
||||
// MessageRelayMinGasLimit to avoid OutOfGas error
|
||||
MessageRelayMinGasLimit uint64 `json:"message_relay_min_gas_limit,omitempty"`
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
GasOracleSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
CommitSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
FinalizeSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
|
||||
// Indicates if bypass features specific to testing environments are enabled.
|
||||
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
|
||||
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
|
||||
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
|
||||
}
|
||||
|
||||
// GasOracleConfig The config for updating gas price oracle.
|
||||
@@ -105,6 +95,7 @@ func convertAndCheck(key string, uniqueAddressesSet map[string]struct{}) (*ecdsa
|
||||
func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
var privateKeysConfig struct {
|
||||
relayerConfigAlias
|
||||
MessageSenderPrivateKey string `json:"message_sender_private_key"`
|
||||
GasOracleSenderPrivateKey string `json:"gas_oracle_sender_private_key"`
|
||||
CommitSenderPrivateKey string `json:"commit_sender_private_key"`
|
||||
FinalizeSenderPrivateKey string `json:"finalize_sender_private_key"`
|
||||
@@ -118,6 +109,11 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
|
||||
uniqueAddressesSet := make(map[string]struct{})
|
||||
|
||||
r.MessageSenderPrivateKey, err = convertAndCheck(privateKeysConfig.MessageSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking message sender private key: %w", err)
|
||||
}
|
||||
|
||||
r.GasOracleSenderPrivateKey, err = convertAndCheck(privateKeysConfig.GasOracleSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking gas oracle sender private key: %w", err)
|
||||
@@ -141,12 +137,14 @@ func (r *RelayerConfig) MarshalJSON() ([]byte, error) {
|
||||
privateKeysConfig := struct {
|
||||
relayerConfigAlias
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKey string `json:"message_sender_private_key"`
|
||||
GasOracleSenderPrivateKey string `json:"gas_oracle_sender_private_key"`
|
||||
CommitSenderPrivateKey string `json:"commit_sender_private_key"`
|
||||
FinalizeSenderPrivateKey string `json:"finalize_sender_private_key"`
|
||||
}{}
|
||||
|
||||
privateKeysConfig.relayerConfigAlias = relayerConfigAlias(*r)
|
||||
privateKeysConfig.MessageSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.MessageSenderPrivateKey))
|
||||
privateKeysConfig.GasOracleSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.GasOracleSenderPrivateKey))
|
||||
privateKeysConfig.CommitSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.CommitSenderPrivateKey))
|
||||
privateKeysConfig.FinalizeSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.FinalizeSenderPrivateKey))
|
||||
@@ -6,6 +6,10 @@ const (
|
||||
gasPriceDiffPrecision = 1000000
|
||||
|
||||
defaultGasPriceDiff = 50000 // 5%
|
||||
|
||||
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
|
||||
|
||||
defaultL2MessageRelayMinGasLimit = 200000
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -8,16 +8,17 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// Layer1Relayer is responsible for
|
||||
@@ -31,30 +32,38 @@ type Layer1Relayer struct {
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
// channel used to communicate with transaction sender
|
||||
messageSender *sender.Sender
|
||||
l2MessengerABI *abi.ABI
|
||||
|
||||
gasOracleSender *sender.Sender
|
||||
l1GasOracleABI *abi.ABI
|
||||
|
||||
minGasLimitForMessageRelay uint64
|
||||
|
||||
lastGasPrice uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
|
||||
l1BlockOrm *orm.L1Block
|
||||
metrics *l1RelayerMetrics
|
||||
l1MessageOrm *orm.L1Message
|
||||
l1BlockOrm *orm.L1Block
|
||||
metrics *l1RelayerMetrics
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, reg prometheus.Registerer) (*Layer1Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l1_relayer", "message_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new message sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
}
|
||||
|
||||
// Ensure test features aren't enabled on the mainnet.
|
||||
if gasOracleSender.GetChainID() == big.NewInt(1) && cfg.EnableTestEnvBypassFeatures {
|
||||
return nil, fmt.Errorf("cannot enable test env features in mainnet")
|
||||
}
|
||||
|
||||
var minGasPrice uint64
|
||||
var gasPriceDiff uint64
|
||||
if cfg.GasOracleConfig != nil {
|
||||
@@ -65,14 +74,25 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
gasPriceDiff = defaultGasPriceDiff
|
||||
}
|
||||
|
||||
minGasLimitForMessageRelay := uint64(defaultL1MessageRelayMinGasLimit)
|
||||
if cfg.MessageRelayMinGasLimit != 0 {
|
||||
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
|
||||
}
|
||||
|
||||
l1Relayer := &Layer1Relayer{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
l1MessageOrm: orm.NewL1Message(db),
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
|
||||
messageSender: messageSender,
|
||||
l2MessengerABI: bridgeAbi.L2ScrollMessengerABI,
|
||||
|
||||
gasOracleSender: gasOracleSender,
|
||||
l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI,
|
||||
|
||||
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
|
||||
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
}
|
||||
@@ -83,9 +103,57 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
return l1Relayer, nil
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.l1MessageOrm.GetL1MessagesByStatus(types.MsgPending, 100)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(msgs) > 0 {
|
||||
log.Info("Processing L1 messages", "count", len(msgs))
|
||||
}
|
||||
|
||||
for _, msg := range msgs {
|
||||
tmpMsg := msg
|
||||
r.metrics.bridgeL1RelayedMsgsTotal.Inc()
|
||||
if err = r.processSavedEvent(&tmpMsg); err != nil {
|
||||
r.metrics.bridgeL1RelayedMsgsFailureTotal.Inc()
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata, r.minGasLimitForMessageRelay)
|
||||
if err != nil && errors.Is(err, ErrExecutionRevertedMessageExpired) {
|
||||
return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
|
||||
}
|
||||
|
||||
if err != nil && errors.Is(err, ErrExecutionRevertedAlreadySuccessExecuted) {
|
||||
return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
|
||||
|
||||
err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer2
|
||||
func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.rollupL1RelayerGasPriceOraclerRunTotal.Inc()
|
||||
r.metrics.bridgeL1RelayerGasPriceOraclerRunTotal.Inc()
|
||||
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
|
||||
@@ -130,7 +198,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = block.BaseFee
|
||||
r.metrics.rollupL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
r.metrics.bridgeL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
|
||||
}
|
||||
}
|
||||
@@ -141,8 +209,24 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.messageSender.ConfirmChan():
|
||||
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.rollupL1GasOraclerConfirmedTotal.Inc()
|
||||
r.metrics.bridgeL1GasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
54
bridge/internal/controller/relayer/l1_relayer_metrics.go
Normal file
54
bridge/internal/controller/relayer/l1_relayer_metrics.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l1RelayerMetrics struct {
|
||||
bridgeL1RelayedMsgsTotal prometheus.Counter
|
||||
bridgeL1RelayedMsgsFailureTotal prometheus.Counter
|
||||
bridgeL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
bridgeL1RelayerLastGasPrice prometheus.Gauge
|
||||
bridgeL1MsgsRelayedConfirmedTotal prometheus.Counter
|
||||
bridgeL1GasOraclerConfirmedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL1RelayerMetricOnce sync.Once
|
||||
l1RelayerMetric *l1RelayerMetrics
|
||||
)
|
||||
|
||||
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
initL1RelayerMetricOnce.Do(func() {
|
||||
l1RelayerMetric = &l1RelayerMetrics{
|
||||
bridgeL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_msg_relayed_total",
|
||||
Help: "The total number of the l1 relayed message.",
|
||||
}),
|
||||
bridgeL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_msg_relayed_failure_total",
|
||||
Help: "The total number of the l1 relayed failure message.",
|
||||
}),
|
||||
bridgeL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_relayed_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
bridgeL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_gas_price_oracler_total",
|
||||
Help: "The total number of layer1 gas price oracler run total",
|
||||
}),
|
||||
bridgeL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_layer1_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of bridge relayer l1",
|
||||
}),
|
||||
bridgeL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_gas_oracler_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l1RelayerMetric
|
||||
}
|
||||
@@ -18,8 +18,35 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
templateL1Message = []*orm.L1Message{
|
||||
{
|
||||
QueueIndex: 1,
|
||||
MsgHash: "msg_hash1",
|
||||
Height: 1,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer1Hash: "hash0",
|
||||
},
|
||||
{
|
||||
QueueIndex: 2,
|
||||
MsgHash: "msg_hash2",
|
||||
Height: 2,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer1Hash: "hash1",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
@@ -40,6 +67,61 @@ func testCreateNewL1Relayer(t *testing.T) {
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
|
||||
func testL1RelayerProcessSaveEvents(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
l1Cfg := cfg.L1Config
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
|
||||
relayer.ProcessSavedEvents()
|
||||
msg1, err := l1MessageOrm.GetL1MessageByQueueIndex(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg1.Status), types.MsgSubmitted)
|
||||
msg2, err := l1MessageOrm.GetL1MessageByQueueIndex(2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg2.Status), types.MsgSubmitted)
|
||||
}
|
||||
|
||||
func testL1RelayerMsgConfirm(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
l1Messages := []*orm.L1Message{
|
||||
{MsgHash: "msg-1", QueueIndex: 0},
|
||||
{MsgHash: "msg-2", QueueIndex: 1},
|
||||
}
|
||||
err := l1MessageOrm.SaveL1Messages(context.Background(), l1Messages)
|
||||
assert.NoError(t, err)
|
||||
// Create and set up the Layer1 Relayer.
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-1",
|
||||
IsSuccessful: true,
|
||||
})
|
||||
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-2",
|
||||
IsSuccessful: false,
|
||||
})
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
msg1, err1 := l1MessageOrm.GetL1MessageByMsgHash("msg-1")
|
||||
msg2, err2 := l1MessageOrm.GetL1MessageByMsgHash("msg-2")
|
||||
return err1 == nil && types.MsgStatus(msg1.Status) == types.MsgConfirmed &&
|
||||
err2 == nil && types.MsgStatus(msg2.Status) == types.MsgRelayFailed
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -19,12 +18,11 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// Layer2Relayer is responsible for
|
||||
@@ -45,6 +43,9 @@ type Layer2Relayer struct {
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
messageSender *sender.Sender
|
||||
l1MessengerABI *abi.ABI
|
||||
|
||||
commitSender *sender.Sender
|
||||
finalizeSender *sender.Sender
|
||||
l1RollupABI *abi.ABI
|
||||
@@ -52,12 +53,15 @@ type Layer2Relayer struct {
|
||||
gasOracleSender *sender.Sender
|
||||
l2GasOracleABI *abi.ABI
|
||||
|
||||
minGasLimitForMessageRelay uint64
|
||||
|
||||
lastGasPrice uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
|
||||
// Used to get batch status from chain_monitor api.
|
||||
chainMonitorClient *resty.Client
|
||||
// A list of processing message.
|
||||
// key(string): confirmation ID, value(string): layer2 hash.
|
||||
processingMessage sync.Map
|
||||
|
||||
// A list of processing batches commitment.
|
||||
// key(string): confirmation ID, value(string): batch hash.
|
||||
@@ -72,6 +76,12 @@ type Layer2Relayer struct {
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l2_relayer", "message_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new message sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
|
||||
@@ -89,11 +99,6 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
// Ensure test features aren't enabled on the mainnet.
|
||||
if commitSender.GetChainID() == big.NewInt(1) && cfg.EnableTestEnvBypassFeatures {
|
||||
return nil, fmt.Errorf("cannot enable test env features in mainnet")
|
||||
}
|
||||
|
||||
var minGasPrice uint64
|
||||
var gasPriceDiff uint64
|
||||
if cfg.GasOracleConfig != nil {
|
||||
@@ -104,6 +109,11 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
gasPriceDiff = defaultGasPriceDiff
|
||||
}
|
||||
|
||||
minGasLimitForMessageRelay := uint64(defaultL2MessageRelayMinGasLimit)
|
||||
if cfg.MessageRelayMinGasLimit != 0 {
|
||||
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
|
||||
}
|
||||
|
||||
layer2Relayer := &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
@@ -114,6 +124,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
|
||||
l2Client: l2Client,
|
||||
|
||||
messageSender: messageSender,
|
||||
l1MessengerABI: bridgeAbi.L1ScrollMessengerABI,
|
||||
|
||||
commitSender: commitSender,
|
||||
finalizeSender: finalizeSender,
|
||||
l1RollupABI: bridgeAbi.ScrollChainABI,
|
||||
@@ -121,21 +134,17 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
gasOracleSender: gasOracleSender,
|
||||
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
|
||||
|
||||
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
|
||||
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
|
||||
cfg: cfg,
|
||||
processingMessage: sync.Map{},
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
}
|
||||
|
||||
// chain_monitor client
|
||||
if cfg.ChainMonitor.Enabled {
|
||||
layer2Relayer.chainMonitorClient = resty.New()
|
||||
layer2Relayer.chainMonitorClient.SetRetryCount(cfg.ChainMonitor.TryTimes)
|
||||
layer2Relayer.chainMonitorClient.SetTimeout(time.Duration(cfg.ChainMonitor.TimeOut) * time.Second)
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
if initGenesis {
|
||||
if err := layer2Relayer.initializeGenesis(); err != nil {
|
||||
@@ -266,7 +275,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.rollupL2RelayerGasPriceOraclerRunTotal.Inc()
|
||||
r.metrics.bridgeL2RelayerGasPriceOraclerRunTotal.Inc()
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
if batch == nil || err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
|
||||
@@ -304,7 +313,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = suggestGasPriceUint64
|
||||
r.metrics.rollupL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
r.metrics.bridgeL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
|
||||
}
|
||||
}
|
||||
@@ -313,13 +322,13 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// get pending batches from database in ascending order by their index.
|
||||
batches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, 5)
|
||||
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
for _, batch := range batches {
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchTotal.Inc()
|
||||
for _, batch := range pendingBatches {
|
||||
r.metrics.bridgeL2RelayerProcessPendingBatchTotal.Inc()
|
||||
// get current header and parent header.
|
||||
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
|
||||
if err != nil {
|
||||
@@ -333,12 +342,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
log.Error("Failed to get parent batch header", "index", batch.Index-1, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if types.RollupStatus(parentBatch.RollupStatus) == types.RollupCommitFailed {
|
||||
log.Error("Previous batch commit failed, halting further committing",
|
||||
"index", parentBatch.Index, "tx hash", parentBatch.CommitTxHash)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// get the chunks for the batch
|
||||
@@ -382,13 +385,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
// send transaction
|
||||
txID := batch.Hash + "-commit"
|
||||
fallbackGasLimit := uint64(float64(batch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
|
||||
if types.RollupStatus(batch.RollupStatus) == types.RollupCommitFailed {
|
||||
// use eth_estimateGas if this batch has been committed failed.
|
||||
fallbackGasLimit = 0
|
||||
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", batch.Hash)
|
||||
}
|
||||
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, fallbackGasLimit)
|
||||
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
@@ -413,7 +410,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
r.metrics.bridgeL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
r.processingCommitment.Store(txID, batch.Hash)
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
|
||||
}
|
||||
@@ -437,30 +434,96 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesTotal.Inc()
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesTotal.Inc()
|
||||
|
||||
batch := batches[0]
|
||||
hash := batch.Hash
|
||||
status := types.ProvingStatus(batch.ProvingStatus)
|
||||
switch status {
|
||||
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
|
||||
if batch.CommittedAt == nil {
|
||||
log.Error("batch.CommittedAt is nil", "index", batch.Index, "hash", batch.Hash)
|
||||
// The proof for this block is not ready yet.
|
||||
return
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", hash)
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
var parentBatch *orm.Batch
|
||||
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
|
||||
// handle unexpected db error
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
|
||||
return
|
||||
}
|
||||
parentBatchStateRoot = parentBatch.StateRoot
|
||||
}
|
||||
|
||||
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, hash)
|
||||
if err != nil {
|
||||
log.Error("get verified proof by hash failed", "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if r.cfg.EnableTestEnvBypassFeatures && utils.NowUTC().Sub(*batch.CommittedAt) > time.Duration(r.cfg.FinalizeBatchWithoutProofTimeoutSec)*time.Second {
|
||||
if err := r.finalizeBatch(batch, false); err != nil {
|
||||
log.Error("Failed to finalize timeout batch without proof", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
}
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
log.Error("agg_proof sanity check fails", "hash", hash, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", batch.Hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
if err := r.finalizeBatch(batch, true); err != nil {
|
||||
log.Error("Failed to finalize batch with proof", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
data, err := r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
txID := hash + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
|
||||
finalizeTxHash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
// This can happen normally if we try to finalize 2 or more
|
||||
// batches around the same time. The 2nd tx might fail since
|
||||
// the client does not see the 1st tx's updates at this point.
|
||||
// TODO: add more fine-grained error handling
|
||||
log.Error(
|
||||
"finalizeBatchWithProof in layer1 failed",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
|
||||
log.Debug(
|
||||
"finalizeBatchWithProof in layer1 failed",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(data),
|
||||
"err", err,
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
|
||||
if err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed",
|
||||
"index", batch.Index, "batch hash", batch.Hash,
|
||||
"tx hash", finalizeTxHash.String(), "err", err)
|
||||
}
|
||||
r.processingFinalization.Store(txID, hash)
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
|
||||
case types.ProvingTaskFailed:
|
||||
// We were unable to prove this batch. There are two possibilities:
|
||||
// (a) Prover bug. In this case, we should fix and redeploy the prover.
|
||||
@@ -478,146 +541,13 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
"ProvedAt", batch.ProvedAt,
|
||||
"ProofTimeSec", batch.ProofTimeSec,
|
||||
)
|
||||
return
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
|
||||
// Check batch status before send `finalizeBatch` tx.
|
||||
if r.cfg.ChainMonitor.Enabled {
|
||||
var batchStatus bool
|
||||
batchStatus, err := r.getBatchStatusByIndex(batch.Index)
|
||||
if err != nil {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
|
||||
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
|
||||
return err
|
||||
}
|
||||
if !batchStatus {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
|
||||
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
var parentBatch *orm.Batch
|
||||
parentBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
|
||||
// handle unexpected db error
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
|
||||
return err
|
||||
}
|
||||
parentBatchStateRoot = parentBatch.StateRoot
|
||||
}
|
||||
|
||||
var txCalldata []byte
|
||||
if withProof {
|
||||
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, batch.Hash)
|
||||
if err != nil {
|
||||
log.Error("get verified proof by hash failed", "hash", batch.Hash, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
log.Error("agg_proof sanity check fails", "hash", batch.Hash, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
txCalldata, err = r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
txCalldata, err = r.l1RollupABI.Pack(
|
||||
"finalizeBatch",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatch failed", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
txID := batch.Hash + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), txCalldata, 0)
|
||||
finalizeTxHash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
// This can happen normally if we try to finalize 2 or more
|
||||
// batches around the same time. The 2nd tx might fail since
|
||||
// the client does not see the 1st tx's updates at this point.
|
||||
// TODO: add more fine-grained error handling
|
||||
log.Error(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(txCalldata),
|
||||
"err", err,
|
||||
)
|
||||
}
|
||||
return err
|
||||
}
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", batch.Index, "batch hash", batch.Hash, "tx hash", batch.Hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch.Hash, finalizeTxHash.String(), types.RollupFinalizing); err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", batch.Index, "batch hash", batch.Hash, "tx hash", finalizeTxHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
r.processingFinalization.Store(txID, batch.Hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// batchStatusResponse the response schema
|
||||
type batchStatusResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data bool `json:"data"`
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) getBatchStatusByIndex(batchIndex uint64) (bool, error) {
|
||||
var response batchStatusResponse
|
||||
resp, err := r.chainMonitorClient.R().SetResult(&response).Get(fmt.Sprintf("%s/v1/batch_status?batch_index=%d", r.cfg.ChainMonitor.BaseURL, batchIndex))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if resp.IsError() {
|
||||
return false, resp.Error().(error)
|
||||
}
|
||||
if response.ErrCode != 0 {
|
||||
return false, fmt.Errorf("failed to get batch status, errCode: %d, errMsg: %s", response.ErrCode, response.ErrMsg)
|
||||
}
|
||||
|
||||
return response.Data, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
transactionType := "Unknown"
|
||||
// check whether it is CommitBatches transaction
|
||||
@@ -628,8 +558,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
status = types.RollupCommitted
|
||||
} else {
|
||||
status = types.RollupCommitFailed
|
||||
r.metrics.rollupL2BatchesCommittedConfirmedFailedTotal.Inc()
|
||||
log.Warn("commitBatch transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
}
|
||||
// @todo handle db error
|
||||
err := r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
|
||||
@@ -638,7 +567,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
r.metrics.rollupL2BatchesCommittedConfirmedTotal.Inc()
|
||||
r.metrics.bridgeL2BatchesCommittedConfirmedTotal.Inc()
|
||||
r.processingCommitment.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
@@ -650,8 +579,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
status = types.RollupFinalized
|
||||
} else {
|
||||
status = types.RollupFinalizeFailed
|
||||
r.metrics.rollupL2BatchesFinalizedConfirmedFailedTotal.Inc()
|
||||
log.Warn("finalizeBatchWithProof transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
}
|
||||
|
||||
// @todo handle db error
|
||||
@@ -661,7 +589,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
r.metrics.rollupL2BatchesFinalizedConfirmedTotal.Inc()
|
||||
r.metrics.bridgeL2BatchesFinalizedConfirmedTotal.Inc()
|
||||
r.processingFinalization.Delete(confirmation.ID)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
|
||||
@@ -672,12 +600,14 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.messageSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.commitSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.finalizeSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.rollupL2BatchesGasOraclerConfirmedTotal.Inc()
|
||||
r.metrics.bridgeL2BatchesGasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
74
bridge/internal/controller/relayer/l2_relayer_metrics.go
Normal file
74
bridge/internal/controller/relayer/l2_relayer_metrics.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l2RelayerMetrics struct {
|
||||
bridgeL2RelayerProcessPendingBatchTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
|
||||
bridgeL2RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
bridgeL2RelayerLastGasPrice prometheus.Gauge
|
||||
bridgeL2RelayerProcessCommittedBatchesTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
|
||||
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
|
||||
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
|
||||
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL2RelayerMetricOnce sync.Once
|
||||
l2RelayerMetric *l2RelayerMetrics
|
||||
)
|
||||
|
||||
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
initL2RelayerMetricOnce.Do(func() {
|
||||
l2RelayerMetric = &l2RelayerMetrics{
|
||||
bridgeL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_pending_batch_total",
|
||||
Help: "The total number of layer2 process pending batch",
|
||||
}),
|
||||
bridgeL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_pending_batch_success_total",
|
||||
Help: "The total number of layer2 process pending success batch",
|
||||
}),
|
||||
bridgeL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_gas_price_oracler_total",
|
||||
Help: "The total number of layer2 gas price oracler run total",
|
||||
}),
|
||||
bridgeL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_layer2_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of bridge relayer l2",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_total",
|
||||
Help: "The total number of layer2 process committed batches run total",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_finalized_total",
|
||||
Help: "The total number of layer2 process committed batches finalized total",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_finalized_success_total",
|
||||
Help: "The total number of layer2 process committed batches finalized success total",
|
||||
}),
|
||||
bridgeL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process committed batches confirmed total",
|
||||
}),
|
||||
bridgeL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_finalized_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
}),
|
||||
bridgeL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2RelayerMetric
|
||||
}
|
||||
@@ -4,12 +4,9 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -22,8 +19,8 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
@@ -121,37 +118,6 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
batchMeta := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
StartChunkHash: chunkHash1.Hex(),
|
||||
EndChunkIndex: 1,
|
||||
EndChunkHash: chunkHash2.Hex(),
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
@@ -378,13 +344,13 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
|
||||
convey.Convey("Failed to send setL2BaseFee tx to layer2", t, func() {
|
||||
targetErr := errors.New("failed to send setL2BaseFee tx to layer2 error")
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (hash common.Hash, err error) {
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
|
||||
return common.Hash{}, targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (hash common.Hash, err error) {
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
|
||||
return common.HexToHash("0x56789abcdef1234"), nil
|
||||
})
|
||||
|
||||
@@ -401,34 +367,3 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
}
|
||||
|
||||
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
|
||||
router := gin.New()
|
||||
r := router.Group("/v1")
|
||||
r.GET("/batch_status", func(ctx *gin.Context) {
|
||||
ctx.JSON(http.StatusOK, struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data bool `json:"data"`
|
||||
}{
|
||||
ErrCode: 0,
|
||||
ErrMsg: "",
|
||||
Data: true,
|
||||
})
|
||||
})
|
||||
return utils.StartHTTPServer(strings.Split(baseURL, "//")[1], router)
|
||||
}
|
||||
|
||||
func testGetBatchStatusByIndex(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
|
||||
status, err := relayer.getBatchStatusByIndex(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, status)
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -86,12 +86,10 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestFunctions(t *testing.T) {
|
||||
setupEnv(t)
|
||||
srv, err := mockChainMonitorServer(cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL)
|
||||
assert.NoError(t, err)
|
||||
defer srv.Close()
|
||||
|
||||
// Run l1 relayer test cases.
|
||||
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
|
||||
t.Run("TestL1RelayerProcessSaveEvents", testL1RelayerProcessSaveEvents)
|
||||
t.Run("TestL1RelayerMsgConfirm", testL1RelayerMsgConfirm)
|
||||
t.Run("TestL1RelayerGasOracleConfirm", testL1RelayerGasOracleConfirm)
|
||||
t.Run("TestL1RelayerProcessGasPriceOracle", testL1RelayerProcessGasPriceOracle)
|
||||
|
||||
@@ -99,11 +97,8 @@ func TestFunctions(t *testing.T) {
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerFinalizeTimeoutBatches", testL2RelayerFinalizeTimeoutBatches)
|
||||
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
|
||||
t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
|
||||
// test getBatchStatusByIndex
|
||||
t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex)
|
||||
}
|
||||
@@ -10,20 +10,17 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte, minGasLimit uint64) (*FeeData, error) {
|
||||
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("estimateLegacyGas SuggestGasPrice failure", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
gasLimit, err := s.estimateGasLimit(auth, contract, input, gasPrice, nil, nil, value)
|
||||
|
||||
gasLimit, err := s.estimateGasLimit(auth, contract, input, gasPrice, nil, nil, value, minGasLimit)
|
||||
if err != nil {
|
||||
log.Error("estimateLegacyGas estimateGasLimit failure", "gas price", gasPrice, "from", auth.From.Hex(),
|
||||
"nonce", auth.Nonce.Uint64(), "contract address", contract.Hex(), "fallback gas limit", fallbackGasLimit, "error", err)
|
||||
if fallbackGasLimit == 0 {
|
||||
return nil, err
|
||||
}
|
||||
gasLimit = fallbackGasLimit
|
||||
log.Error("estimateLegacyGas estimateGasLimit failure", "gasPrice", gasPrice, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return &FeeData{
|
||||
gasPrice: gasPrice,
|
||||
@@ -31,7 +28,7 @@ func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Add
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte, minGasLimit uint64) (*FeeData, error) {
|
||||
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("estimateDynamicGas SuggestGasTipCap failure", "error", err)
|
||||
@@ -46,15 +43,10 @@ func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Ad
|
||||
gasTipCap,
|
||||
new(big.Int).Mul(baseFee, big.NewInt(2)),
|
||||
)
|
||||
gasLimit, err := s.estimateGasLimit(auth, contract, input, nil, gasTipCap, gasFeeCap, value)
|
||||
gasLimit, err := s.estimateGasLimit(auth, contract, input, nil, gasTipCap, gasFeeCap, value, minGasLimit)
|
||||
if err != nil {
|
||||
log.Error("estimateDynamicGas estimateGasLimit failure",
|
||||
"from", auth.From.Hex(), "nonce", auth.Nonce.Uint64(), "contract address", contract.Hex(),
|
||||
"fallback gas limit", fallbackGasLimit, "error", err)
|
||||
if fallbackGasLimit == 0 {
|
||||
return nil, err
|
||||
}
|
||||
gasLimit = fallbackGasLimit
|
||||
log.Error("estimateDynamicGas estimateGasLimit failure", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return &FeeData{
|
||||
gasLimit: gasLimit,
|
||||
@@ -63,7 +55,7 @@ func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Ad
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Sender) estimateGasLimit(opts *bind.TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) {
|
||||
func (s *Sender) estimateGasLimit(opts *bind.TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int, minGasLimit uint64) (uint64, error) {
|
||||
msg := ethereum.CallMsg{
|
||||
From: opts.From,
|
||||
To: contract,
|
||||
@@ -75,11 +67,14 @@ func (s *Sender) estimateGasLimit(opts *bind.TransactOpts, contract *common.Addr
|
||||
}
|
||||
gasLimit, err := s.client.EstimateGas(s.ctx, msg)
|
||||
if err != nil {
|
||||
log.Error("estimateGasLimit EstimateGas failure", "error", err)
|
||||
log.Error("estimateGasLimit EstimateGas failure", "msg", msg, "error", err)
|
||||
return 0, err
|
||||
}
|
||||
if minGasLimit > gasLimit {
|
||||
gasLimit = minGasLimit
|
||||
}
|
||||
|
||||
gasLimit = gasLimit * 12 / 10 // 20% extra gas to avoid out of gas error
|
||||
gasLimit = gasLimit * 15 / 10 // 50% extra gas to avoid out of gas error
|
||||
|
||||
return gasLimit, nil
|
||||
}
|
||||
@@ -33,59 +33,59 @@ func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
|
||||
initSenderMetricOnce.Do(func() {
|
||||
sm = &senderMetrics{
|
||||
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_total",
|
||||
Name: "bridge_sender_send_transaction_total",
|
||||
Help: "The total number of sending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_send_transaction_full_tx_failure_total",
|
||||
Name: "bridge_sender_send_transaction_full_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for full size tx.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_repeat_transaction_failure_total",
|
||||
Name: "bridge_sender_send_transaction_repeat_transaction_failure_total",
|
||||
Help: "The total number of sending transaction failure for repeat transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_get_fee_failure_total",
|
||||
Name: "bridge_sender_send_transaction_get_fee_failure_total",
|
||||
Help: "The total number of sending transaction failure for getting fee.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_send_tx_failure_total",
|
||||
Name: "bridge_sender_send_transaction_send_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for sending tx.",
|
||||
}, []string{"service", "name"}),
|
||||
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_resubmit_send_transaction_total",
|
||||
Name: "bridge_sender_send_transaction_resubmit_send_transaction_total",
|
||||
Help: "The total number of resubmit transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_pending_tx_count",
|
||||
Name: "bridge_sender_pending_tx_count",
|
||||
Help: "The pending tx count in the sender.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_fee_cap",
|
||||
Name: "bridge_sender_gas_fee_cap",
|
||||
Help: "The gas fee of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_tip_cap",
|
||||
Name: "bridge_sender_gas_tip_cap",
|
||||
Help: "The gas tip of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_price_cap",
|
||||
Name: "bridge_sender_gas_price_cap",
|
||||
Help: "The gas price of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_limit",
|
||||
Name: "bridge_sender_gas_limit",
|
||||
Help: "The gas limit of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_nonce",
|
||||
Name: "bridge_sender_nonce",
|
||||
Help: "The nonce of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_check_pending_transaction_total",
|
||||
Name: "bridge_sender_check_pending_transaction_total",
|
||||
Help: "The total number of check pending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_check_balancer_total",
|
||||
Name: "bridge_sender_check_balancer_total",
|
||||
Help: "The total number of check balancer.",
|
||||
}, []string{"service", "name"}),
|
||||
}
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,6 +56,15 @@ type FeeData struct {
|
||||
gasLimit uint64
|
||||
}
|
||||
|
||||
func newEmptyFeeData() *FeeData {
|
||||
return &FeeData{
|
||||
gasFeeCap: big.NewInt(0),
|
||||
gasTipCap: big.NewInt(0),
|
||||
gasPrice: big.NewInt(0),
|
||||
gasLimit: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// PendingTransaction submitted but pending transactions
|
||||
type PendingTransaction struct {
|
||||
submitAt uint64
|
||||
@@ -85,6 +94,8 @@ type Sender struct {
|
||||
stopCh chan struct{}
|
||||
|
||||
metrics *senderMetrics
|
||||
|
||||
cachedMaxFeeData *FeeData // hacky way to avoid getFeeData error
|
||||
}
|
||||
|
||||
// NewSender returns a new instance of transaction sender
|
||||
@@ -129,19 +140,20 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
|
||||
}
|
||||
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
client: client,
|
||||
chainID: chainID,
|
||||
auth: auth,
|
||||
minBalance: config.MinBalance,
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: cmapV2.New[*PendingTransaction](),
|
||||
stopCh: make(chan struct{}),
|
||||
name: name,
|
||||
service: service,
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
client: client,
|
||||
chainID: chainID,
|
||||
auth: auth,
|
||||
minBalance: config.MinBalance,
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: cmapV2.New[*PendingTransaction](),
|
||||
stopCh: make(chan struct{}),
|
||||
name: name,
|
||||
service: service,
|
||||
cachedMaxFeeData: newEmptyFeeData(),
|
||||
}
|
||||
sender.metrics = initSenderMetrics(reg)
|
||||
|
||||
@@ -165,11 +177,6 @@ func (s *Sender) IsFull() bool {
|
||||
return s.pendingTxs.Count() >= s.config.PendingLimit
|
||||
}
|
||||
|
||||
// GetChainID returns the chain ID associated with the sender.
|
||||
func (s *Sender) GetChainID() *big.Int {
|
||||
return s.chainID
|
||||
}
|
||||
|
||||
// Stop stop the sender module.
|
||||
func (s *Sender) Stop() {
|
||||
close(s.stopCh)
|
||||
@@ -187,15 +194,35 @@ func (s *Sender) SendConfirmation(cfm *Confirmation) {
|
||||
s.confirmCh <- cfm
|
||||
}
|
||||
|
||||
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (*FeeData, error) {
|
||||
if s.config.TxType == DynamicFeeTxType {
|
||||
return s.estimateDynamicGas(auth, target, value, data, fallbackGasLimit)
|
||||
return s.estimateDynamicGas(auth, target, value, data, minGasLimit)
|
||||
}
|
||||
return s.estimateLegacyGas(auth, target, value, data, minGasLimit)
|
||||
}
|
||||
|
||||
func (s *Sender) cacheMaxFeeData(feeData *FeeData) {
|
||||
if feeData == nil {
|
||||
log.Error("cacheMaxFeeData", "err", "feeData must not be nil")
|
||||
return
|
||||
}
|
||||
|
||||
if feeData.gasFeeCap != nil && feeData.gasFeeCap.Cmp(s.cachedMaxFeeData.gasFeeCap) > 0 {
|
||||
s.cachedMaxFeeData.gasFeeCap = feeData.gasFeeCap
|
||||
}
|
||||
if feeData.gasTipCap != nil && feeData.gasTipCap.Cmp(s.cachedMaxFeeData.gasTipCap) > 0 {
|
||||
s.cachedMaxFeeData.gasTipCap = feeData.gasTipCap
|
||||
}
|
||||
if feeData.gasPrice != nil && feeData.gasPrice.Cmp(s.cachedMaxFeeData.gasPrice) > 0 {
|
||||
s.cachedMaxFeeData.gasPrice = feeData.gasPrice
|
||||
}
|
||||
if feeData.gasLimit > s.cachedMaxFeeData.gasLimit {
|
||||
s.cachedMaxFeeData.gasLimit = feeData.gasLimit
|
||||
}
|
||||
return s.estimateLegacyGas(auth, target, value, data, fallbackGasLimit)
|
||||
}
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (common.Hash, error) {
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (common.Hash, error) {
|
||||
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
if s.IsFull() {
|
||||
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(1)
|
||||
@@ -220,10 +247,15 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
|
||||
}
|
||||
}()
|
||||
|
||||
if feeData, err = s.getFeeData(s.auth, target, value, data, fallbackGasLimit); err != nil {
|
||||
if feeData, err = s.getFeeData(s.auth, target, value, data, minGasLimit); err != nil {
|
||||
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to get fee data", "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
|
||||
if s.cachedMaxFeeData.gasLimit == 0 { // if no MaxFeeData cached, and getFeeData fails
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data for the first time, err: %w", err)
|
||||
}
|
||||
feeData = s.cachedMaxFeeData
|
||||
} else {
|
||||
s.cacheMaxFeeData(feeData)
|
||||
}
|
||||
|
||||
if tx, err = s.createAndSendTx(s.auth, feeData, target, value, data, nil); err != nil {
|
||||
@@ -416,6 +448,7 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
}
|
||||
|
||||
log.Debug("Transaction gas adjustment details", txInfo)
|
||||
s.cacheMaxFeeData(feeData)
|
||||
|
||||
nonce := tx.Nonce()
|
||||
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
)
|
||||
|
||||
const TXBatch = 50
|
||||
@@ -59,7 +59,7 @@ func TestSender(t *testing.T) {
|
||||
|
||||
t.Run("test new sender", testNewSender)
|
||||
t.Run("test pending limit", testPendLimit)
|
||||
t.Run("test fallback gas limit", testFallbackGasLimit)
|
||||
t.Run("test min gas limit", testMinGasLimit)
|
||||
t.Run("test resubmit transaction", testResubmitTransaction)
|
||||
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
|
||||
t.Run("test check pending transaction", testCheckPendingTransaction)
|
||||
@@ -102,39 +102,32 @@ func testPendLimit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testFallbackGasLimit(t *testing.T) {
|
||||
func testMinGasLimit(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
client, err := ethclient.Dial(cfgCopy.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// FallbackGasLimit = 0
|
||||
txHash0, err := s.SendTransaction("0", &common.Address{}, big.NewInt(1), nil, 0)
|
||||
// MinGasLimit = 0
|
||||
txHash0, err := newSender.SendTransaction("0", &common.Address{}, big.NewInt(1), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
|
||||
assert.NoError(t, err)
|
||||
assert.Greater(t, tx0.Gas(), uint64(0))
|
||||
|
||||
// FallbackGasLimit = 100000
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(s, "estimateGasLimit",
|
||||
func(opts *bind.TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) {
|
||||
return 0, errors.New("estimateGasLimit error")
|
||||
},
|
||||
)
|
||||
|
||||
txHash1, err := s.SendTransaction("1", &common.Address{}, big.NewInt(1), nil, 100000)
|
||||
// MinGasLimit = 100000
|
||||
txHash1, err := newSender.SendTransaction("1", &common.Address{}, big.NewInt(1), nil, 100000)
|
||||
assert.NoError(t, err)
|
||||
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100000), tx1.Gas())
|
||||
assert.Equal(t, tx1.Gas(), uint64(150000))
|
||||
|
||||
s.Stop()
|
||||
patchGuard.Reset()
|
||||
newSender.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// BatchProposer proposes batches based on available unbatched chunks.
|
||||
@@ -48,8 +48,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
|
||||
"maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch,
|
||||
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
|
||||
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
"batchTimeoutSec", cfg.BatchTimeoutSec,
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier)
|
||||
"batchTimeoutSec", cfg.BatchTimeoutSec)
|
||||
|
||||
return &BatchProposer{
|
||||
ctx: ctx,
|
||||
@@ -64,39 +63,39 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_circle_total",
|
||||
Name: "bridge_propose_batch_circle_total",
|
||||
Help: "Total number of propose batch total.",
|
||||
}),
|
||||
proposeBatchFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_failure_circle_total",
|
||||
Name: "bridge_propose_batch_failure_circle_total",
|
||||
Help: "Total number of propose batch total.",
|
||||
}),
|
||||
proposeBatchUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_update_info_total",
|
||||
Name: "bridge_propose_batch_update_info_total",
|
||||
Help: "Total number of propose batch update info total.",
|
||||
}),
|
||||
proposeBatchUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_update_info_failure_total",
|
||||
Name: "bridge_propose_batch_update_info_failure_total",
|
||||
Help: "Total number of propose batch update info failure total.",
|
||||
}),
|
||||
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_total_l1_commit_gas",
|
||||
Name: "bridge_propose_batch_total_l1_commit_gas",
|
||||
Help: "The total l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_total_l1_call_data_size",
|
||||
Name: "bridge_propose_batch_total_l1_call_data_size",
|
||||
Help: "The total l1 call data size",
|
||||
}),
|
||||
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_chunks_number",
|
||||
Name: "bridge_propose_batch_chunks_number",
|
||||
Help: "The number of chunks in the batch",
|
||||
}),
|
||||
batchFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_first_block_timeout_reached_total",
|
||||
Name: "bridge_propose_batch_first_block_timeout_reached_total",
|
||||
Help: "Total times of batch's first block timeout reached",
|
||||
}),
|
||||
batchChunksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_chunks_propose_not_enough_total",
|
||||
Name: "bridge_propose_batch_chunks_propose_not_enough_total",
|
||||
Help: "Total number of batch chunk propose not enough",
|
||||
}),
|
||||
}
|
||||
@@ -123,7 +122,7 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk, batchMeta *ty
|
||||
if numChunks <= 0 {
|
||||
return nil
|
||||
}
|
||||
chunks, err := p.dbChunksToRollupChunks(dbChunks)
|
||||
chunks, err := p.dbChunksToBridgeChunks(dbChunks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -155,8 +154,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// select at most p.maxChunkNumPerBatch chunks
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch))
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch)+1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -177,10 +175,8 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
|
||||
}
|
||||
|
||||
// Add extra gas costs
|
||||
totalL1CommitGas += 100000 // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc
|
||||
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
|
||||
totalL1CommitGas += 20000 // 1 time sstore
|
||||
totalL1CommitGas += 21000 // base fee for tx
|
||||
totalL1CommitGas += types.CalldataNonZeroByteGas // version in calldata
|
||||
|
||||
// adjusting gas:
|
||||
@@ -207,13 +203,12 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
|
||||
// adjust batch header hash gas cost, batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
|
||||
totalL1CommitGas -= types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalL1CommitGas -= types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1CommitGas -= types.GetMemoryExpansionCost(uint64(totalL1CommitCalldataSize))
|
||||
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
|
||||
totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalL1CommitGas += types.GetMemoryExpansionCost(uint64(totalL1CommitCalldataSize))
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
if totalChunks > p.maxChunkNumPerBatch ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
@@ -239,6 +234,8 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in batching",
|
||||
"currentTotalChunks", totalChunks,
|
||||
"maxChunkNumPerBatch", p.maxChunkNumPerBatch,
|
||||
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
@@ -252,20 +249,12 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
|
||||
}
|
||||
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec ||
|
||||
totalChunks == p.maxChunkNumPerBatch {
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"start block number", dbChunks[0].StartBlockNumber,
|
||||
"start block timestamp", dbChunks[0].StartBlockTime,
|
||||
"current time", currentTimeSec,
|
||||
)
|
||||
} else {
|
||||
log.Info("reached maximum number of chunks in batch",
|
||||
"chunk count", totalChunks,
|
||||
)
|
||||
}
|
||||
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"start block number", dbChunks[0].StartBlockNumber,
|
||||
"first block timestamp", dbChunks[0].StartBlockTime,
|
||||
"chunk outdated time threshold", currentTimeSec,
|
||||
)
|
||||
batchMeta.TotalL1CommitGas = totalL1CommitGas
|
||||
batchMeta.TotalL1CommitCalldataSize = totalL1CommitCalldataSize
|
||||
p.batchFirstBlockTimeoutReached.Inc()
|
||||
@@ -280,7 +269,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) dbChunksToRollupChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
|
||||
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
|
||||
chunks := make([]*types.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
wrappedBlocks, err := p.l2BlockOrm.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
65
bridge/internal/controller/watcher/batch_proposer_test.go
Normal file
65
bridge/internal/controller/watcher/batch_proposer_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// TODO: Add unit tests that the limits are enforced correctly.
|
||||
func testBatchProposer(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
// get all batches.
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(0), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
assert.Equal(t, batches[0].Hash, dbChunks[0].BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(dbChunks[0].ProvingStatus))
|
||||
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
blocks, err := blockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 2)
|
||||
assert.Equal(t, dbChunks[0].Hash, blocks[0].ChunkHash)
|
||||
assert.Equal(t, dbChunks[0].Hash, blocks[1].ChunkHash)
|
||||
}
|
||||
@@ -14,10 +14,14 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// maxNumBlockPerChunk is the maximum number of blocks we allow per chunk.
|
||||
// Normally we will pack much fewer blocks because of other limits.
|
||||
const maxNumBlockPerChunk int = 100
|
||||
|
||||
// chunkRowConsumption is map(sub-circuit name => sub-circuit row count)
|
||||
type chunkRowConsumption map[string]uint64
|
||||
|
||||
@@ -51,7 +55,6 @@ type ChunkProposer struct {
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
maxBlockNumPerChunk uint64
|
||||
maxTxNumPerChunk uint64
|
||||
maxL1CommitGasPerChunk uint64
|
||||
maxL1CommitCalldataSizePerChunk uint64
|
||||
@@ -80,15 +83,13 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
|
||||
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
|
||||
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
|
||||
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier)
|
||||
"chunkTimeoutSec", cfg.ChunkTimeoutSec)
|
||||
|
||||
return &ChunkProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxBlockNumPerChunk: cfg.MaxBlockNumPerChunk,
|
||||
maxTxNumPerChunk: cfg.MaxTxNumPerChunk,
|
||||
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
|
||||
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
@@ -97,51 +98,51 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_circle_total",
|
||||
Name: "bridge_propose_chunk_circle_total",
|
||||
Help: "Total number of propose chunk total.",
|
||||
}),
|
||||
proposeChunkFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_failure_circle_total",
|
||||
Name: "bridge_propose_chunk_failure_circle_total",
|
||||
Help: "Total number of propose chunk failure total.",
|
||||
}),
|
||||
proposeChunkUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_update_info_total",
|
||||
Name: "bridge_propose_chunk_update_info_total",
|
||||
Help: "Total number of propose chunk update info total.",
|
||||
}),
|
||||
proposeChunkUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_update_info_failure_total",
|
||||
Name: "bridge_propose_chunk_update_info_failure_total",
|
||||
Help: "Total number of propose chunk update info failure total.",
|
||||
}),
|
||||
chunkTxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_tx_num",
|
||||
Name: "bridge_propose_chunk_tx_num",
|
||||
Help: "The chunk tx num",
|
||||
}),
|
||||
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_estimate_l1_commit_gas",
|
||||
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
|
||||
Help: "The chunk estimate l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_total_l1_commit_call_data_size",
|
||||
Name: "bridge_propose_chunk_total_l1_commit_call_data_size",
|
||||
Help: "The total l1 commit call data size",
|
||||
}),
|
||||
totalTxGasUsed: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_total_tx_gas_used",
|
||||
Name: "bridge_propose_chunk_total_tx_gas_used",
|
||||
Help: "The total tx gas used",
|
||||
}),
|
||||
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_max_tx_consumption",
|
||||
Name: "bridge_propose_chunk_max_tx_consumption",
|
||||
Help: "The max tx consumption",
|
||||
}),
|
||||
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_chunk_block_number",
|
||||
Name: "bridge_propose_chunk_chunk_block_number",
|
||||
Help: "The number of blocks in the chunk",
|
||||
}),
|
||||
chunkFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_first_block_timeout_reached_total",
|
||||
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
|
||||
Help: "Total times of chunk's first block timeout reached",
|
||||
}),
|
||||
chunkBlocksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_blocks_propose_not_enough_total",
|
||||
Name: "bridge_propose_chunk_blocks_propose_not_enough_total",
|
||||
Help: "Total number of chunk block propose not enough",
|
||||
}),
|
||||
}
|
||||
@@ -190,8 +191,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// select at most p.maxBlockNumPerChunk blocks
|
||||
blocks, err := p.l2BlockOrm.GetL2WrappedBlocksGEHeight(p.ctx, unchunkedBlockHeight, int(p.maxBlockNumPerChunk))
|
||||
blocks, err := p.l2BlockOrm.GetL2WrappedBlocksGEHeight(p.ctx, unchunkedBlockHeight, maxNumBlockPerChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -293,21 +293,12 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
}
|
||||
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if chunk.Blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec ||
|
||||
uint64(len(chunk.Blocks)) == p.maxBlockNumPerChunk {
|
||||
if chunk.Blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"block number", chunk.Blocks[0].Header.Number,
|
||||
"block timestamp", chunk.Blocks[0].Header.Time,
|
||||
"current time", currentTimeSec,
|
||||
)
|
||||
} else {
|
||||
log.Info("reached maximum number of blocks in chunk",
|
||||
"start block number", chunk.Blocks[0].Header.Number,
|
||||
"block count", len(chunk.Blocks),
|
||||
)
|
||||
}
|
||||
|
||||
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"block number", blocks[0].Header.Number,
|
||||
"block timestamp", blocks[0].Header.Time,
|
||||
"block outdated time threshold", currentTimeSec,
|
||||
)
|
||||
p.chunkFirstBlockTimeoutReached.Inc()
|
||||
p.chunkTxNum.Set(float64(totalTxNum))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
68
bridge/internal/controller/watcher/chunk_proposer_test.go
Normal file
68
bridge/internal/controller/watcher/chunk_proposer_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// TODO: Add unit tests that the limits are enforced correctly.
|
||||
func testChunkProposer(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
expectedChunk := &types.Chunk{
|
||||
Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2},
|
||||
}
|
||||
expectedHash, err := expectedChunk.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
assert.Equal(t, expectedHash.Hex(), chunks[0].Hash)
|
||||
}
|
||||
|
||||
func testChunkProposerRowConsumption(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 0, // !
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 0)
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -18,9 +17,9 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
type rollupEvent struct {
|
||||
@@ -40,6 +39,9 @@ type L1WatcherClient struct {
|
||||
// The number of new blocks to wait for a block to be confirmed
|
||||
confirmations rpc.BlockNumber
|
||||
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
messageQueueAddress common.Address
|
||||
messageQueueABI *abi.ABI
|
||||
|
||||
@@ -55,7 +57,7 @@ type L1WatcherClient struct {
|
||||
}
|
||||
|
||||
// NewL1WatcherClient returns a new instance of L1WatcherClient.
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
savedHeight, err := l1MessageOrm.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
@@ -84,6 +86,9 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
batchOrm: orm.NewBatch(db),
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridgeAbi.L1ScrollMessengerABI,
|
||||
|
||||
messageQueueAddress: messageQueueAddress,
|
||||
messageQueueABI: bridgeAbi.L1MessageQueueABI,
|
||||
|
||||
@@ -117,39 +122,52 @@ func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
|
||||
// FetchBlockHeader pull latest L1 blocks and save in DB
|
||||
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
w.metrics.l1WatcherFetchBlockHeaderTotal.Inc()
|
||||
fromBlock := int64(w.processedBlockHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
if toBlock < fromBlock {
|
||||
return nil
|
||||
}
|
||||
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
|
||||
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
|
||||
}
|
||||
|
||||
var block *gethTypes.Header
|
||||
block, err := w.client.HeaderByNumber(w.ctx, big.NewInt(int64(blockHeight)))
|
||||
if err != nil {
|
||||
log.Warn("Failed to get block", "height", blockHeight, "err", err)
|
||||
var blocks []orm.L1Block
|
||||
var err error
|
||||
height := fromBlock
|
||||
for ; height <= toBlock; height++ {
|
||||
var block *gethTypes.Header
|
||||
block, err = w.client.HeaderByNumber(w.ctx, big.NewInt(height))
|
||||
if err != nil {
|
||||
log.Warn("Failed to get block", "height", height, "err", err)
|
||||
break
|
||||
}
|
||||
var baseFee uint64
|
||||
if block.BaseFee != nil {
|
||||
baseFee = block.BaseFee.Uint64()
|
||||
}
|
||||
blocks = append(blocks, orm.L1Block{
|
||||
Number: uint64(height),
|
||||
Hash: block.Hash().String(),
|
||||
BaseFee: baseFee,
|
||||
GasOracleStatus: int16(types.GasOraclePending),
|
||||
})
|
||||
}
|
||||
|
||||
// failed at first block, return with the error
|
||||
if height == fromBlock {
|
||||
return err
|
||||
}
|
||||
toBlock = height - 1
|
||||
|
||||
if block == nil {
|
||||
log.Warn("Received nil block", "height", blockHeight)
|
||||
return errors.New("received nil block")
|
||||
}
|
||||
|
||||
var baseFee uint64
|
||||
if block.BaseFee != nil {
|
||||
baseFee = block.BaseFee.Uint64()
|
||||
}
|
||||
|
||||
l1Block := orm.L1Block{
|
||||
Number: blockHeight,
|
||||
Hash: block.Hash().String(),
|
||||
BaseFee: baseFee,
|
||||
GasOracleStatus: int16(types.GasOraclePending),
|
||||
}
|
||||
|
||||
err = w.l1BlockOrm.InsertL1Blocks(w.ctx, []orm.L1Block{l1Block})
|
||||
// insert succeed blocks
|
||||
err = w.l1BlockOrm.InsertL1Blocks(w.ctx, blocks)
|
||||
if err != nil {
|
||||
log.Warn("Failed to insert L1 block to db", "blockHeight", blockHeight, "err", err)
|
||||
log.Warn("Failed to insert L1 block to db", "fromBlock", fromBlock, "toBlock", toBlock, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update processed height
|
||||
w.processedBlockHeight = blockHeight
|
||||
w.processedBlockHeight = uint64(toBlock)
|
||||
w.metrics.l1WatcherFetchBlockHeaderProcessedBlockHeight.Set(float64(w.processedBlockHeight))
|
||||
return nil
|
||||
}
|
||||
@@ -181,15 +199,18 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.scrollChainAddress,
|
||||
w.messageQueueAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 3)
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0][0] = bridgeAbi.L1QueueTransactionEventSignature
|
||||
query.Topics[0][1] = bridgeAbi.L1CommitBatchEventSignature
|
||||
query.Topics[0][2] = bridgeAbi.L1FinalizeBatchEventSignature
|
||||
query.Topics[0][1] = bridgeAbi.L1RelayedMessageEventSignature
|
||||
query.Topics[0][2] = bridgeAbi.L1FailedRelayedMessageEventSignature
|
||||
query.Topics[0][3] = bridgeAbi.L1CommitBatchEventSignature
|
||||
query.Topics[0][4] = bridgeAbi.L1FinalizeBatchEventSignature
|
||||
|
||||
logs, err := w.client.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
@@ -26,31 +26,31 @@ func initL1WatcherMetrics(reg prometheus.Registerer) *l1WatcherMetrics {
|
||||
initL1WatcherMetricOnce.Do(func() {
|
||||
l1WatcherMetric = &l1WatcherMetrics{
|
||||
l1WatcherFetchBlockHeaderTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_header_total",
|
||||
Name: "bridge_l1_watcher_fetch_block_header_total",
|
||||
Help: "The total number of l1 watcher fetch block header total",
|
||||
}),
|
||||
l1WatcherFetchBlockHeaderProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_header_processed_block_height",
|
||||
Name: "bridge_l1_watcher_fetch_block_header_processed_block_height",
|
||||
Help: "The current processed block height of l1 watcher fetch block header",
|
||||
}),
|
||||
l1WatcherFetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_contract_event_total",
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_total",
|
||||
Help: "The total number of l1 watcher fetch contract event total",
|
||||
}),
|
||||
l1WatcherFetchContractEventSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_contract_event_success_total",
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_success_total",
|
||||
Help: "The total number of l1 watcher fetch contract event success total",
|
||||
}),
|
||||
l1WatcherFetchContractEventProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_contract_event_processed_block_height",
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_processed_block_height",
|
||||
Help: "The current processed block height of l1 watcher fetch contract event",
|
||||
}),
|
||||
l1WatcherFetchContractEventSentEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_contract_event_sent_event_total",
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_sent_event_total",
|
||||
Help: "The current processed block height of l1 watcher fetch contract sent event",
|
||||
}),
|
||||
l1WatcherFetchContractEventRollupEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_contract_event_rollup_event_total",
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_rollup_event_total",
|
||||
Help: "The current processed block height of l1 watcher fetch contract rollup event",
|
||||
}),
|
||||
}
|
||||
@@ -20,9 +20,9 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
commonTypes "scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
@@ -30,7 +30,8 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
l1Cfg := cfg.L1Config
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
|
||||
l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
|
||||
assert.NoError(t, watcher.FetchContractEvent())
|
||||
return watcher, db
|
||||
}
|
||||
@@ -19,9 +19,9 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
|
||||
@@ -116,7 +116,7 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
return
|
||||
}
|
||||
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
|
||||
w.metrics.rollupL2BlocksFetchedGap.Set(float64(blockHeight - to))
|
||||
w.metrics.bridgeL2BlocksFetchedGap.Set(float64(blockHeight - to))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,9 +180,6 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
|
||||
}
|
||||
|
||||
if len(blocks) > 0 {
|
||||
for _, block := range blocks {
|
||||
w.metrics.rollupL2BlockL1CommitCalldataSize.Set(float64(block.EstimateL1CommitCalldataSize()))
|
||||
}
|
||||
if err := w.l2BlockOrm.InsertL2Blocks(w.ctx, blocks); err != nil {
|
||||
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
|
||||
}
|
||||
@@ -249,7 +246,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
w.metrics.rollupL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
|
||||
w.metrics.bridgeL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
|
||||
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
@@ -8,13 +8,12 @@ import (
|
||||
)
|
||||
|
||||
type l2WatcherMetrics struct {
|
||||
fetchRunningMissingBlocksTotal prometheus.Counter
|
||||
fetchRunningMissingBlocksHeight prometheus.Gauge
|
||||
fetchContractEventTotal prometheus.Counter
|
||||
fetchContractEventHeight prometheus.Gauge
|
||||
rollupL2MsgsRelayedEventsTotal prometheus.Counter
|
||||
rollupL2BlocksFetchedGap prometheus.Gauge
|
||||
rollupL2BlockL1CommitCalldataSize prometheus.Gauge
|
||||
fetchRunningMissingBlocksTotal prometheus.Counter
|
||||
fetchRunningMissingBlocksHeight prometheus.Gauge
|
||||
fetchContractEventTotal prometheus.Counter
|
||||
fetchContractEventHeight prometheus.Gauge
|
||||
bridgeL2MsgsRelayedEventsTotal prometheus.Counter
|
||||
bridgeL2BlocksFetchedGap prometheus.Gauge
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -26,33 +25,29 @@ func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
|
||||
initL2WatcherMetricOnce.Do(func() {
|
||||
l2WatcherMetric = &l2WatcherMetrics{
|
||||
fetchRunningMissingBlocksTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l2_watcher_fetch_running_missing_blocks_total",
|
||||
Name: "bridge_l2_watcher_fetch_running_missing_blocks_total",
|
||||
Help: "The total number of l2 watcher fetch running missing blocks",
|
||||
}),
|
||||
fetchRunningMissingBlocksHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_watcher_fetch_running_missing_blocks_height",
|
||||
Name: "bridge_l2_watcher_fetch_running_missing_blocks_height",
|
||||
Help: "The total number of l2 watcher fetch running missing blocks height",
|
||||
}),
|
||||
fetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l2_watcher_fetch_contract_events_total",
|
||||
Name: "bridge_l2_watcher_fetch_contract_events_total",
|
||||
Help: "The total number of l2 watcher fetch contract events",
|
||||
}),
|
||||
fetchContractEventHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_watcher_fetch_contract_height",
|
||||
Name: "bridge_l2_watcher_fetch_contract_height",
|
||||
Help: "The total number of l2 watcher fetch contract height",
|
||||
}),
|
||||
rollupL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l2_watcher_msg_relayed_events_total",
|
||||
bridgeL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_msg_relayed_events_total",
|
||||
Help: "The total number of l2 watcher msg relayed event",
|
||||
}),
|
||||
rollupL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_watcher_blocks_fetched_gap",
|
||||
bridgeL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_blocks_fetched_gap",
|
||||
Help: "The gap of l2 fetch",
|
||||
}),
|
||||
rollupL2BlockL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_block_l1_commit_calldata_size",
|
||||
Help: "The l1 commitBatch calldata size of the l2 block",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2WatcherMetric
|
||||
@@ -24,11 +24,11 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
cutils "scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
"scroll-tech/rollup/mock_bridge"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
)
|
||||
|
||||
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
|
||||
@@ -51,7 +51,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.GasOracleSenderPrivateKey, "test", "test", nil)
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create several transactions and commit to block
|
||||
@@ -72,7 +72,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.GasOracleSenderPrivateKey)
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKey)
|
||||
|
||||
// deploy mock bridge
|
||||
_, tx, _, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -110,10 +110,10 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestParseBridgeEventLogsL2RelayedMessageEventSignature", testParseBridgeEventLogsL2RelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature)
|
||||
|
||||
// Run chunk proposer test cases.
|
||||
t.Run("TestChunkProposerLimits", testChunkProposerLimits)
|
||||
// Run chunk-proposer test cases.
|
||||
t.Run("TestChunkProposer", testChunkProposer)
|
||||
t.Run("TestChunkProposerRowConsumption", testChunkProposerRowConsumption)
|
||||
|
||||
// Run chunk proposer test cases.
|
||||
t.Run("TestBatchProposerLimits", testBatchProposerLimits)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeEstimation", testBatchCommitGasAndCalldataSizeEstimation)
|
||||
// Run batch-proposer test cases.
|
||||
t.Run("TestBatchProposer", testBatchProposer)
|
||||
}
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
@@ -193,22 +192,22 @@ func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string)
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
// GetFailedAndPendingBatches retrieves batches with failed or pending status up to the specified limit.
|
||||
// GetPendingBatches retrieves pending batches up to the specified limit.
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetFailedAndPendingBatches(ctx context.Context, limit int) ([]*Batch, error) {
|
||||
func (o *Batch) GetPendingBatches(ctx context.Context, limit int) ([]*Batch, error) {
|
||||
if limit <= 0 {
|
||||
return nil, errors.New("limit must be greater than zero")
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("rollup_status = ? OR rollup_status = ?", types.RollupCommitFailed, types.RollupPending)
|
||||
db = db.Where("rollup_status = ?", types.RollupPending)
|
||||
db = db.Order("index ASC")
|
||||
db = db.Limit(limit)
|
||||
|
||||
var batches []*Batch
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetFailedAndPendingBatches error: %w", err)
|
||||
return nil, fmt.Errorf("Batch.GetPendingBatches error: %w", err)
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
@@ -356,9 +355,9 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
|
||||
|
||||
switch status {
|
||||
case types.RollupCommitted:
|
||||
updateFields["committed_at"] = utils.NowUTC()
|
||||
updateFields["committed_at"] = time.Now()
|
||||
case types.RollupFinalized:
|
||||
updateFields["finalized_at"] = utils.NowUTC()
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -381,7 +380,7 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
|
||||
updateFields["commit_tx_hash"] = commitTxHash
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupCommitted {
|
||||
updateFields["committed_at"] = utils.NowUTC()
|
||||
updateFields["committed_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -70,41 +70,19 @@ func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}
|
||||
return l1Blocks, nil
|
||||
}
|
||||
|
||||
// InsertL1Blocks batch inserts l1 blocks.
|
||||
// If there's a block number conflict (e.g., due to reorg), soft deletes the existing block and inserts the new one.
|
||||
// InsertL1Blocks batch insert l1 blocks
|
||||
func (o *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
|
||||
if len(blocks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return o.db.Transaction(func(tx *gorm.DB) error {
|
||||
minBlockNumber := blocks[0].Number
|
||||
for _, block := range blocks[1:] {
|
||||
if block.Number < minBlockNumber {
|
||||
minBlockNumber = block.Number
|
||||
}
|
||||
}
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L1Block{})
|
||||
|
||||
db := tx.WithContext(ctx)
|
||||
db = db.Model(&L1Block{})
|
||||
db = db.Where("number >= ?", minBlockNumber)
|
||||
result := db.Delete(&L1Block{})
|
||||
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("L1Block.InsertL1Blocks error: soft deleting blocks failed, block numbers starting from: %v, error: %w", minBlockNumber, result.Error)
|
||||
}
|
||||
|
||||
// If the number of deleted blocks exceeds the limit (input length + 64), treat it as an anomaly.
|
||||
// Because reorg with >= 64 blocks is very unlikely to happen.
|
||||
if result.RowsAffected >= int64(len(blocks)+64) {
|
||||
return fmt.Errorf("L1Block.InsertL1Blocks error: too many blocks were deleted, count: %d", result.RowsAffected)
|
||||
}
|
||||
|
||||
if err := db.Create(&blocks).Error; err != nil {
|
||||
return fmt.Errorf("L1Block.InsertL1Blocks error: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := db.Create(&blocks).Error; err != nil {
|
||||
return fmt.Errorf("L1Block.InsertL1Blocks error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash
|
||||
@@ -89,53 +89,6 @@ func tearDownEnv(t *testing.T) {
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestL1BlockOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
l1BlockOrm := NewL1Block(db)
|
||||
|
||||
// mock blocks
|
||||
block1 := L1Block{Number: 1, Hash: "hash1"}
|
||||
block2 := L1Block{Number: 2, Hash: "hash2"}
|
||||
block3 := L1Block{Number: 3, Hash: "hash3"}
|
||||
block2AfterReorg := L1Block{Number: 2, Hash: "hash2-reorg"}
|
||||
|
||||
err = l1BlockOrm.InsertL1Blocks(context.Background(), []L1Block{block1, block2, block3})
|
||||
assert.NoError(t, err)
|
||||
|
||||
height, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(3), height)
|
||||
|
||||
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 3)
|
||||
assert.Equal(t, "hash1", blocks[0].Hash)
|
||||
assert.Equal(t, "hash2", blocks[1].Hash)
|
||||
assert.Equal(t, "hash3", blocks[2].Hash)
|
||||
|
||||
// reorg handling: insert another block with same height and different hash
|
||||
err = l1BlockOrm.InsertL1Blocks(context.Background(), []L1Block{block2AfterReorg})
|
||||
assert.NoError(t, err)
|
||||
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 2)
|
||||
assert.Equal(t, "hash1", blocks[0].Hash)
|
||||
assert.Equal(t, "hash2-reorg", blocks[1].Hash)
|
||||
|
||||
err = l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(context.Background(), "hash1", types.GasOracleImported, "txhash1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
updatedBlocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, updatedBlocks, 2)
|
||||
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBlocks[0].GasOracleStatus))
|
||||
assert.Equal(t, "txhash1", updatedBlocks[0].OracleTxHash)
|
||||
}
|
||||
|
||||
func TestL2BlockOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -258,17 +211,14 @@ func TestBatchOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2), count)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitFailed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pendingBatches, err := batchOrm.GetFailedAndPendingBatches(context.Background(), 100)
|
||||
pendingBatches, err := batchOrm.GetPendingBatches(context.Background(), 100)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(pendingBatches))
|
||||
|
||||
rollupStatus, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash1, batchHash2})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(rollupStatus))
|
||||
assert.Equal(t, types.RollupCommitFailed, rollupStatus[0])
|
||||
assert.Equal(t, types.RollupPending, rollupStatus[0])
|
||||
assert.Equal(t, types.RollupPending, rollupStatus[1])
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
)
|
||||
|
||||
// Keccak2 compute the keccack256 of two concatenations of bytes32
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user