mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 23:48:15 -05:00
Compare commits
3 Commits
v4.2.17
...
revert_mon
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e34cfbbd49 | ||
|
|
cab8d4a903 | ||
|
|
fc8ea35ad0 |
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -20,7 +20,7 @@ Your PR title must follow [conventional commits](https://www.conventionalcommits
|
||||
|
||||
### Deployment tag versioning
|
||||
|
||||
Has `tag` in `common/version.go` been updated or have you added `bump-version` label to this PR?
|
||||
Has `tag` in `common/version.go` been updated?
|
||||
|
||||
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
|
||||
- [ ] Yes
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Rollup
|
||||
name: Bridge
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -8,11 +8,10 @@ on:
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'rollup/**'
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/rollup.yml'
|
||||
- '.github/workflows/bridge.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
@@ -20,11 +19,10 @@ on:
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'rollup/**'
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/rollup.yml'
|
||||
- '.github/workflows/bridge.yml'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
@@ -46,7 +44,7 @@ jobs:
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Lint
|
||||
working-directory: 'rollup'
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make mock_abi
|
||||
@@ -64,14 +62,14 @@ jobs:
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
run: goimports -local scroll-tech/rollup/ -w .
|
||||
working-directory: 'rollup'
|
||||
run: goimports -local scroll-tech/bridge/ -w .
|
||||
working-directory: 'bridge'
|
||||
- name: Run go mod tidy
|
||||
run: go mod tidy
|
||||
working-directory: 'rollup'
|
||||
working-directory: 'bridge'
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'rollup'
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
@@ -97,13 +95,13 @@ jobs:
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C rollup mock_abi
|
||||
- name: Build rollup binaries
|
||||
working-directory: 'rollup'
|
||||
make -C bridge mock_abi
|
||||
- name: Build bridge binaries
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
make rollup_bins
|
||||
- name: Test rollup packages
|
||||
working-directory: 'rollup'
|
||||
make bridge_bins
|
||||
- name: Test bridge packages
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
@@ -111,7 +109,7 @@ jobs:
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: rollup
|
||||
flags: bridge
|
||||
# docker-build:
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
4
.github/workflows/bump_version.yml
vendored
4
.github/workflows/bump_version.yml
vendored
@@ -2,17 +2,15 @@ name: Bump Version
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ develop ]
|
||||
branches: [develop]
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
try-to-bump:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'bump-version')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
|
||||
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -9,7 +9,6 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/common.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -19,7 +18,6 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/common.yml'
|
||||
|
||||
jobs:
|
||||
|
||||
2
.github/workflows/coordinator.yml
vendored
2
.github/workflows/coordinator.yml
vendored
@@ -10,7 +10,6 @@ on:
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
pull_request:
|
||||
@@ -22,7 +21,6 @@ on:
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
|
||||
|
||||
2
.github/workflows/database.yml
vendored
2
.github/workflows/database.yml
vendored
@@ -10,7 +10,6 @@ on:
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/database.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -21,7 +20,6 @@ on:
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/database.yml'
|
||||
|
||||
jobs:
|
||||
|
||||
21
.github/workflows/docker.yaml
vendored
21
.github/workflows/docker.yaml
vendored
@@ -48,6 +48,27 @@ jobs:
|
||||
tags: scrolltech/gas-oracle:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
msg_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push msg_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/msg_relayer.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/msg-relayer:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
2
.github/workflows/integration.yaml
vendored
2
.github/workflows/integration.yaml
vendored
@@ -36,7 +36,7 @@ jobs:
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C rollup mock_abi
|
||||
make -C bridge mock_abi
|
||||
make -C common/bytecode all
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
|
||||
8
Makefile
8
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: check update dev_docker build_test_docker run_test_docker clean
|
||||
|
||||
L2GETH_TAG=scroll-v4.3.55
|
||||
L2GETH_TAG=scroll-v4.3.34
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
@@ -8,7 +8,7 @@ help: ## Display this help message
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
lint: ## The code's format and security checks.
|
||||
make -C rollup lint
|
||||
make -C bridge lint
|
||||
make -C common lint
|
||||
make -C coordinator lint
|
||||
make -C database lint
|
||||
@@ -17,7 +17,7 @@ lint: ## The code's format and security checks.
|
||||
|
||||
update: ## update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
@@ -25,7 +25,7 @@ update: ## update dependencies
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
goimports -local $(PWD)/rollup/ -w .
|
||||
goimports -local $(PWD)/bridge/ -w .
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
goimports -local $(PWD)/coordinator/ -w .
|
||||
|
||||
@@ -15,14 +15,14 @@ docker pull postgres
|
||||
make dev_docker
|
||||
```
|
||||
|
||||
## Testing Rollup & Coordinator
|
||||
## Testing Bridge & Coordinator
|
||||
|
||||
### For Non-Apple Silicon (M1/M2) Macs
|
||||
|
||||
Run the tests using the following commands:
|
||||
|
||||
```bash
|
||||
go test -v -race -covermode=atomic scroll-tech/rollup/...
|
||||
go test -v -race -covermode=atomic scroll-tech/bridge/...
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
@@ -55,7 +55,7 @@ This command runs a Docker container named `scroll_test_container` from the `scr
|
||||
Once the Docker container is running, execute the tests using the following commands:
|
||||
|
||||
```bash
|
||||
go test -v -race -covermode=atomic scroll-tech/rollup/...
|
||||
go test -v -race -covermode=atomic scroll-tech/bridge/...
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
|
||||
@@ -89,6 +89,9 @@ var (
|
||||
// L2FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
|
||||
L2FailedRelayedMessageEventSignature common.Hash
|
||||
|
||||
// L2ImportBlockEventSignature = keccak256("ImportBlock(bytes32,uint256,uint256,uint256,bytes32)")
|
||||
L2ImportBlockEventSignature common.Hash
|
||||
|
||||
// L2AppendMessageEventSignature = keccak256("AppendMessage(uint256,bytes32)")
|
||||
L2AppendMessageEventSignature common.Hash
|
||||
)
|
||||
@@ -150,6 +153,8 @@ func init() {
|
||||
L2RelayedMessageEventSignature = L2ScrollMessengerABI.Events["RelayedMessage"].ID
|
||||
L2FailedRelayedMessageEventSignature = L2ScrollMessengerABI.Events["FailedRelayedMessage"].ID
|
||||
|
||||
L2ImportBlockEventSignature = L1BlockContainerABI.Events["ImportBlock"].ID
|
||||
|
||||
L2AppendMessageEventSignature = L2MessageQueueABI.Events["AppendMessage"].ID
|
||||
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ require (
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
|
||||
@@ -531,8 +531,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
||||
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
||||
0
rollup/.gitignore → bridge/.gitignore
vendored
0
rollup/.gitignore → bridge/.gitignore
vendored
@@ -1,15 +1,17 @@
|
||||
.PHONY: mock_abi rollup_bins event_watcher gas_oracle rollup_relayer test lint clean docker
|
||||
.PHONY: lint docker clean bridge
|
||||
|
||||
IMAGE_NAME=bridge
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
mock_abi:
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./rollup/mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out ./rollup/mock_bridge/MockBridgeL1.go
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./rollup/mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out ./rollup/mock_bridge/MockBridgeL2.go
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL1.go
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL2.go
|
||||
|
||||
rollup_bins: ## Builds the Rollup bins.
|
||||
bridge_bins: ## Builds the Bridge bins.
|
||||
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
event_watcher: ## Builds the event_watcher bin
|
||||
@@ -18,6 +20,9 @@ event_watcher: ## Builds the event_watcher bin
|
||||
gas_oracle: ## Builds the gas_oracle bin
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
|
||||
message_relayer: ## Builds the message_relayer bin
|
||||
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
|
||||
|
||||
rollup_relayer: ## Builds the rollup_relayer bin
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
@@ -34,8 +39,10 @@ docker_push:
|
||||
docker docker push scrolltech/gas-oracle:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/event-watcher:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/msg-relayer:${IMAGE_VERSION}
|
||||
|
||||
docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/gas-oracle:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/gas_oracle.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/event-watcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/event_watcher.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/rollup-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/rollup_relayer.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/msg-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/msg_relayer.Dockerfile
|
||||
36
bridge/README.md
Normal file
36
bridge/README.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Bridge
|
||||
|
||||
This repo contains the Scroll bridge.
|
||||
|
||||
In addition, launching the bridge will launch a separate instance of l2geth, and sets up a communication channel
|
||||
between the two, over JSON-RPC sockets.
|
||||
|
||||
Something we should pay attention is that all private keys inside sender instance cannot be duplicated.
|
||||
|
||||
## Dependency
|
||||
|
||||
+ install `abigen`
|
||||
|
||||
``` bash
|
||||
go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
|
||||
```
|
||||
|
||||
## Build
|
||||
|
||||
```bash
|
||||
make clean
|
||||
make bridge
|
||||
```
|
||||
|
||||
## Start
|
||||
* use default ports and config.json
|
||||
|
||||
```bash
|
||||
./build/bin/bridge --http
|
||||
```
|
||||
|
||||
* use specified ports and config.json
|
||||
|
||||
```bash
|
||||
./build/bin/bridge --config ./config.json --http --http.addr localhost --http.port 8290
|
||||
```
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
var (
|
||||
// ScrollChainABI holds information about ScrollChain's context and available invokable methods.
|
||||
ScrollChainABI *abi.ABI
|
||||
// L1ScrollMessengerABI holds information about L1ScrollMessenger's context and available invokable methods.
|
||||
L1ScrollMessengerABI *abi.ABI
|
||||
// L1MessageQueueABI holds information about L1MessageQueue contract's context and available invokable methods.
|
||||
L1MessageQueueABI *abi.ABI
|
||||
// L2GasPriceOracleABI holds information about L2GasPriceOracle's context and available invokable methods.
|
||||
@@ -18,15 +20,25 @@ var (
|
||||
|
||||
// L2ScrollMessengerABI holds information about L2ScrollMessenger's context and available invokable methods.
|
||||
L2ScrollMessengerABI *abi.ABI
|
||||
// L1BlockContainerABI holds information about L1BlockContainer contract's context and available invokable methods.
|
||||
L1BlockContainerABI *abi.ABI
|
||||
// L1GasPriceOracleABI holds information about L1GasPriceOracle's context and available invokable methods.
|
||||
L1GasPriceOracleABI *abi.ABI
|
||||
// L2MessageQueueABI holds information about L2MessageQueue contract's context and available invokable methods.
|
||||
L2MessageQueueABI *abi.ABI
|
||||
|
||||
// L1SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes)")
|
||||
L1SentMessageEventSignature common.Hash
|
||||
// L1RelayedMessageEventSignature = keccak256("RelayedMessage(bytes32)")
|
||||
L1RelayedMessageEventSignature common.Hash
|
||||
// L1FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
|
||||
L1FailedRelayedMessageEventSignature common.Hash
|
||||
|
||||
// L1CommitBatchEventSignature = keccak256("CommitBatch(uint256,bytes32)")
|
||||
L1CommitBatchEventSignature common.Hash
|
||||
// L1FinalizeBatchEventSignature = keccak256("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")
|
||||
L1FinalizeBatchEventSignature common.Hash
|
||||
|
||||
// L1QueueTransactionEventSignature = keccak256("QueueTransaction(address,address,uint256,uint64,uint256,bytes)")
|
||||
L1QueueTransactionEventSignature common.Hash
|
||||
|
||||
@@ -37,19 +49,28 @@ var (
|
||||
// L2FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
|
||||
L2FailedRelayedMessageEventSignature common.Hash
|
||||
|
||||
// L2ImportBlockEventSignature = keccak256("ImportBlock(bytes32,uint256,uint256,uint256,bytes32)")
|
||||
L2ImportBlockEventSignature common.Hash
|
||||
|
||||
// L2AppendMessageEventSignature = keccak256("AppendMessage(uint256,bytes32)")
|
||||
L2AppendMessageEventSignature common.Hash
|
||||
)
|
||||
|
||||
func init() {
|
||||
ScrollChainABI, _ = ScrollChainMetaData.GetAbi()
|
||||
L1ScrollMessengerABI, _ = L1ScrollMessengerMetaData.GetAbi()
|
||||
L1MessageQueueABI, _ = L1MessageQueueMetaData.GetAbi()
|
||||
L2GasPriceOracleABI, _ = L2GasPriceOracleMetaData.GetAbi()
|
||||
|
||||
L2ScrollMessengerABI, _ = L2ScrollMessengerMetaData.GetAbi()
|
||||
L1BlockContainerABI, _ = L1BlockContainerMetaData.GetAbi()
|
||||
L2MessageQueueABI, _ = L2MessageQueueMetaData.GetAbi()
|
||||
L1GasPriceOracleABI, _ = L1GasPriceOracleMetaData.GetAbi()
|
||||
|
||||
L1SentMessageEventSignature = L1ScrollMessengerABI.Events["SentMessage"].ID
|
||||
L1RelayedMessageEventSignature = L1ScrollMessengerABI.Events["RelayedMessage"].ID
|
||||
L1FailedRelayedMessageEventSignature = L1ScrollMessengerABI.Events["FailedRelayedMessage"].ID
|
||||
|
||||
L1CommitBatchEventSignature = ScrollChainABI.Events["CommitBatch"].ID
|
||||
L1FinalizeBatchEventSignature = ScrollChainABI.Events["FinalizeBatch"].ID
|
||||
|
||||
@@ -59,6 +80,8 @@ func init() {
|
||||
L2RelayedMessageEventSignature = L2ScrollMessengerABI.Events["RelayedMessage"].ID
|
||||
L2FailedRelayedMessageEventSignature = L2ScrollMessengerABI.Events["FailedRelayedMessage"].ID
|
||||
|
||||
L2ImportBlockEventSignature = L1BlockContainerABI.Events["ImportBlock"].ID
|
||||
|
||||
L2AppendMessageEventSignature = L2MessageQueueABI.Events["AppendMessage"].ID
|
||||
}
|
||||
|
||||
@@ -11,13 +11,21 @@ import (
|
||||
func TestEventSignature(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
assert.Equal(L1SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
|
||||
assert.Equal(L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2c32d4ae151744d0bf0b9464a3e897a1d17ed2f1af71f7c9a75f12ce0d28238f"))
|
||||
assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("26ba82f907317eedc97d0cbef23de76a43dd6edb563bdb6e9407645b950a7a2d"))
|
||||
|
||||
assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e"))
|
||||
|
||||
assert.Equal(L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
|
||||
assert.Equal(L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(L2ImportBlockEventSignature, common.HexToHash("a7823f45e1ee21f9530b77959b57507ad515a14fa9fa24d262ee80e79b2b5745"))
|
||||
|
||||
assert.Equal(L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693"))
|
||||
}
|
||||
|
||||
@@ -109,3 +117,12 @@ func TestPackSetL2BaseFee(t *testing.T) {
|
||||
_, err = l2GasOracleABI.Pack("setL2BaseFee", baseFee)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackImportBlock(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1BlockContainerABI := L1BlockContainerABI
|
||||
|
||||
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, []byte{}, false)
|
||||
assert.NoError(err)
|
||||
}
|
||||
@@ -17,8 +17,8 @@ import (
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -75,7 +75,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
|
||||
@@ -89,7 +89,6 @@ func action(ctx *cli.Context) error {
|
||||
// Start l2 watcher process
|
||||
go utils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
|
||||
// Finish start all l2 functions
|
||||
|
||||
log.Info("Start event-watcher successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
7
bridge/cmd/event_watcher/main.go
Normal file
7
bridge/cmd/event_watcher/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/event_watcher/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -17,10 +17,10 @@ import (
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
butils "scroll-tech/rollup/internal/utils"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
butils "scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -78,7 +78,8 @@ func action(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
|
||||
if err != nil {
|
||||
7
bridge/cmd/gas_oracle/main.go
Normal file
7
bridge/cmd/gas_oracle/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/gas_oracle/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
)
|
||||
|
||||
// MockApp mockApp-test client manager.
|
||||
@@ -22,29 +22,29 @@ type MockApp struct {
|
||||
mockApps map[utils.MockAppName]docker.AppAPI
|
||||
|
||||
originFile string
|
||||
rollupFile string
|
||||
bridgeFile string
|
||||
|
||||
args []string
|
||||
}
|
||||
|
||||
// NewRollupApp return a new rollupApp manager, name mush be one them.
|
||||
func NewRollupApp(base *docker.App, file string) *MockApp {
|
||||
// NewBridgeApp return a new bridgeApp manager, name mush be one them.
|
||||
func NewBridgeApp(base *docker.App, file string) *MockApp {
|
||||
|
||||
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", base.Timestamp)
|
||||
rollupApp := &MockApp{
|
||||
bridgeFile := fmt.Sprintf("/tmp/%d_bridge-config.json", base.Timestamp)
|
||||
bridgeApp := &MockApp{
|
||||
base: base,
|
||||
mockApps: make(map[utils.MockAppName]docker.AppAPI),
|
||||
originFile: file,
|
||||
rollupFile: rollupFile,
|
||||
args: []string{"--log.debug", "--config", rollupFile},
|
||||
bridgeFile: bridgeFile,
|
||||
args: []string{"--log.debug", "--config", bridgeFile},
|
||||
}
|
||||
if err := rollupApp.MockConfig(true); err != nil {
|
||||
if err := bridgeApp.MockConfig(true); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return rollupApp
|
||||
return bridgeApp
|
||||
}
|
||||
|
||||
// RunApp run rollup-test child process by multi parameters.
|
||||
// RunApp run bridge-test child process by multi parameters.
|
||||
func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) {
|
||||
if !(name == utils.EventWatcherApp ||
|
||||
name == utils.GasOracleApp ||
|
||||
@@ -72,16 +72,16 @@ func (b *MockApp) WaitExit() {
|
||||
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
|
||||
}
|
||||
|
||||
// Free stop and release rollup mocked apps.
|
||||
// Free stop and release bridge mocked apps.
|
||||
func (b *MockApp) Free() {
|
||||
b.WaitExit()
|
||||
_ = os.Remove(b.rollupFile)
|
||||
_ = os.Remove(b.bridgeFile)
|
||||
}
|
||||
|
||||
// MockConfig creates a new rollup config.
|
||||
// MockConfig creates a new bridge config.
|
||||
func (b *MockApp) MockConfig(store bool) error {
|
||||
base := b.base
|
||||
// Load origin rollup config file.
|
||||
// Load origin bridge config file.
|
||||
cfg, err := config.NewConfig(b.originFile)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -97,10 +97,10 @@ func (b *MockApp) MockConfig(store bool) error {
|
||||
if !store {
|
||||
return nil
|
||||
}
|
||||
// Store changed rollup config into a temp file.
|
||||
// Store changed bridge config into a temp file.
|
||||
data, err := json.Marshal(b.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(b.rollupFile, data, 0600)
|
||||
return os.WriteFile(b.bridgeFile, data, 0600)
|
||||
}
|
||||
94
bridge/cmd/msg_relayer/app/app.go
Normal file
94
bridge/cmd/msg_relayer/app/app.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up message-relayer app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "message-relayer"
|
||||
app.Usage = "The Scroll Message Relayer"
|
||||
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Register `message-relayer-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.MessageRelayerApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Start l1relayer process
|
||||
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start message-relayer successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run message_relayer cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
7
bridge/cmd/msg_relayer/main.go
Normal file
7
bridge/cmd/msg_relayer/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/msg_relayer/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -17,10 +17,10 @@ import (
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
butils "scroll-tech/rollup/internal/utils"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
butils "scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
7
bridge/cmd/rollup_relayer/main.go
Normal file
7
bridge/cmd/rollup_relayer/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/rollup_relayer/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -2,10 +2,12 @@
|
||||
"l1_config": {
|
||||
"confirmations": "0x6",
|
||||
"endpoint": "DUMMY_ENDPOINT",
|
||||
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"l1_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"scroll_chain_address": "0x0000000000000000000000000000000000000000",
|
||||
"start_height": 0,
|
||||
"relayer_config": {
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
@@ -25,6 +27,7 @@
|
||||
"gas_price_diff": 50000
|
||||
},
|
||||
"finalize_batch_interval_sec": 0,
|
||||
"message_sender_private_key": "1212121212121212121212121212121212121212121212121212121212121212",
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
|
||||
}
|
||||
},
|
||||
@@ -35,6 +38,7 @@
|
||||
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "DUMMY_ENDPOINT",
|
||||
@@ -53,16 +57,11 @@
|
||||
"min_gas_price": 0,
|
||||
"gas_price_diff": 50000
|
||||
},
|
||||
"chain_monitor": {
|
||||
"timeout": 3,
|
||||
"try_times": 5,
|
||||
"base_url": "http://localhost:8750"
|
||||
},
|
||||
"finalize_batch_interval_sec": 0,
|
||||
"message_sender_private_key": "1212121212121212121212121212121212121212121212121212121212121212",
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313",
|
||||
"commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414",
|
||||
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515",
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"max_tx_num_per_chunk": 1123,
|
||||
@@ -1,14 +1,12 @@
|
||||
module scroll-tech/rollup
|
||||
module scroll-tech/bridge
|
||||
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
@@ -18,20 +16,12 @@ require (
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/bytedance/sonic v1.9.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.1 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
@@ -43,19 +33,13 @@ require (
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
@@ -70,17 +54,12 @@ require (
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
@@ -13,20 +13,13 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
||||
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
|
||||
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
@@ -34,28 +27,11 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
|
||||
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
@@ -65,7 +41,6 @@ github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXi
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
@@ -92,21 +67,14 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
@@ -119,19 +87,12 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -156,8 +117,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc h1:eK3NOpjgm/b2TQ6rYqWx92Zri0kBuxf6gKjjsVxWKr8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28 h1:CECBTWhZ5NGAn8lGFB4ooRAYxZns8PXoX8kTR/14c04=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
|
||||
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -170,15 +131,6 @@ github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL
|
||||
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
@@ -186,21 +138,14 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
|
||||
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@@ -209,9 +154,6 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -221,22 +163,15 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
@@ -251,9 +186,7 @@ gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHN
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
|
||||
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
@@ -18,7 +18,7 @@ func TestConfig(t *testing.T) {
|
||||
data, err := json.Marshal(cfg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tmpJSON := fmt.Sprintf("/tmp/%d_rollup_config.json", time.Now().Nanosecond())
|
||||
tmpJSON := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
|
||||
defer func() {
|
||||
if _, err = os.Stat(tmpJSON); err == nil {
|
||||
assert.NoError(t, os.Remove(tmpJSON))
|
||||
@@ -13,6 +13,8 @@ type L1Config struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The start height to sync event from layer 1
|
||||
StartHeight uint64 `json:"start_height"`
|
||||
// The L1ScrollMessenger contract address deployed on layer 1 chain.
|
||||
L1MessengerAddress common.Address `json:"l1_messenger_address"`
|
||||
// The L1MessageQueue contract address deployed on layer 1 chain.
|
||||
L1MessageQueueAddress common.Address `json:"l1_message_queue_address"`
|
||||
// The ScrollChain contract address deployed on layer 1 chain.
|
||||
@@ -37,18 +37,13 @@ type SenderConfig struct {
|
||||
PendingLimit int `json:"pending_limit"`
|
||||
}
|
||||
|
||||
// ChainMonitor this config is used to get batch status from chain_monitor API.
|
||||
type ChainMonitor struct {
|
||||
TimeOut int `json:"timeout"`
|
||||
TryTimes int `json:"try_times"`
|
||||
BaseURL string `json:"base_url"`
|
||||
}
|
||||
|
||||
// RelayerConfig loads relayer configuration items.
|
||||
// What we need to pay attention to is that
|
||||
type RelayerConfig struct {
|
||||
// RollupContractAddress store the rollup contract address.
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
|
||||
// MessengerContractAddress store the scroll messenger contract address.
|
||||
MessengerContractAddress common.Address `json:"messenger_contract_address"`
|
||||
// GasPriceOracleContractAddress store the scroll messenger contract address.
|
||||
GasPriceOracleContractAddress common.Address `json:"gas_price_oracle_contract_address"`
|
||||
// sender config
|
||||
@@ -59,11 +54,8 @@ type RelayerConfig struct {
|
||||
FinalizeBatchIntervalSec uint64 `json:"finalize_batch_interval_sec"`
|
||||
// MessageRelayMinGasLimit to avoid OutOfGas error
|
||||
MessageRelayMinGasLimit uint64 `json:"message_relay_min_gas_limit,omitempty"`
|
||||
// ChainMonitor config of monitoring service
|
||||
ChainMonitor *ChainMonitor `json:"chain_monitor,omitempty"`
|
||||
// GasCostIncreaseMultiplier multiplier for min gas limit estimation
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier,omitempty"`
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
GasOracleSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
CommitSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
FinalizeSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
@@ -103,6 +95,7 @@ func convertAndCheck(key string, uniqueAddressesSet map[string]struct{}) (*ecdsa
|
||||
func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
var privateKeysConfig struct {
|
||||
relayerConfigAlias
|
||||
MessageSenderPrivateKey string `json:"message_sender_private_key"`
|
||||
GasOracleSenderPrivateKey string `json:"gas_oracle_sender_private_key"`
|
||||
CommitSenderPrivateKey string `json:"commit_sender_private_key"`
|
||||
FinalizeSenderPrivateKey string `json:"finalize_sender_private_key"`
|
||||
@@ -116,6 +109,11 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
|
||||
uniqueAddressesSet := make(map[string]struct{})
|
||||
|
||||
r.MessageSenderPrivateKey, err = convertAndCheck(privateKeysConfig.MessageSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking message sender private key: %w", err)
|
||||
}
|
||||
|
||||
r.GasOracleSenderPrivateKey, err = convertAndCheck(privateKeysConfig.GasOracleSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking gas oracle sender private key: %w", err)
|
||||
@@ -139,12 +137,14 @@ func (r *RelayerConfig) MarshalJSON() ([]byte, error) {
|
||||
privateKeysConfig := struct {
|
||||
relayerConfigAlias
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKey string `json:"message_sender_private_key"`
|
||||
GasOracleSenderPrivateKey string `json:"gas_oracle_sender_private_key"`
|
||||
CommitSenderPrivateKey string `json:"commit_sender_private_key"`
|
||||
FinalizeSenderPrivateKey string `json:"finalize_sender_private_key"`
|
||||
}{}
|
||||
|
||||
privateKeysConfig.relayerConfigAlias = relayerConfigAlias(*r)
|
||||
privateKeysConfig.MessageSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.MessageSenderPrivateKey))
|
||||
privateKeysConfig.GasOracleSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.GasOracleSenderPrivateKey))
|
||||
privateKeysConfig.CommitSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.CommitSenderPrivateKey))
|
||||
privateKeysConfig.FinalizeSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.FinalizeSenderPrivateKey))
|
||||
@@ -6,6 +6,10 @@ const (
|
||||
gasPriceDiffPrecision = 1000000
|
||||
|
||||
defaultGasPriceDiff = 50000 // 5%
|
||||
|
||||
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
|
||||
|
||||
defaultL2MessageRelayMinGasLimit = 200000
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -8,16 +8,17 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// Layer1Relayer is responsible for
|
||||
@@ -31,19 +32,32 @@ type Layer1Relayer struct {
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
// channel used to communicate with transaction sender
|
||||
messageSender *sender.Sender
|
||||
l2MessengerABI *abi.ABI
|
||||
|
||||
gasOracleSender *sender.Sender
|
||||
l1GasOracleABI *abi.ABI
|
||||
|
||||
minGasLimitForMessageRelay uint64
|
||||
|
||||
lastGasPrice uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
|
||||
l1BlockOrm *orm.L1Block
|
||||
metrics *l1RelayerMetrics
|
||||
l1MessageOrm *orm.L1Message
|
||||
l1BlockOrm *orm.L1Block
|
||||
metrics *l1RelayerMetrics
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, reg prometheus.Registerer) (*Layer1Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l1_relayer", "message_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new message sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
@@ -60,14 +74,25 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
gasPriceDiff = defaultGasPriceDiff
|
||||
}
|
||||
|
||||
minGasLimitForMessageRelay := uint64(defaultL1MessageRelayMinGasLimit)
|
||||
if cfg.MessageRelayMinGasLimit != 0 {
|
||||
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
|
||||
}
|
||||
|
||||
l1Relayer := &Layer1Relayer{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
l1MessageOrm: orm.NewL1Message(db),
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
|
||||
messageSender: messageSender,
|
||||
l2MessengerABI: bridgeAbi.L2ScrollMessengerABI,
|
||||
|
||||
gasOracleSender: gasOracleSender,
|
||||
l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI,
|
||||
|
||||
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
|
||||
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
}
|
||||
@@ -78,9 +103,57 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
return l1Relayer, nil
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.l1MessageOrm.GetL1MessagesByStatus(types.MsgPending, 100)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(msgs) > 0 {
|
||||
log.Info("Processing L1 messages", "count", len(msgs))
|
||||
}
|
||||
|
||||
for _, msg := range msgs {
|
||||
tmpMsg := msg
|
||||
r.metrics.bridgeL1RelayedMsgsTotal.Inc()
|
||||
if err = r.processSavedEvent(&tmpMsg); err != nil {
|
||||
r.metrics.bridgeL1RelayedMsgsFailureTotal.Inc()
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata, r.minGasLimitForMessageRelay)
|
||||
if err != nil && errors.Is(err, ErrExecutionRevertedMessageExpired) {
|
||||
return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
|
||||
}
|
||||
|
||||
if err != nil && errors.Is(err, ErrExecutionRevertedAlreadySuccessExecuted) {
|
||||
return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
|
||||
|
||||
err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer2
|
||||
func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.rollupL1RelayerGasPriceOraclerRunTotal.Inc()
|
||||
r.metrics.bridgeL1RelayerGasPriceOraclerRunTotal.Inc()
|
||||
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
|
||||
@@ -125,7 +198,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = block.BaseFee
|
||||
r.metrics.rollupL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
r.metrics.bridgeL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
|
||||
}
|
||||
}
|
||||
@@ -136,8 +209,24 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.messageSender.ConfirmChan():
|
||||
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.rollupL1GasOraclerConfirmedTotal.Inc()
|
||||
r.metrics.bridgeL1GasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
54
bridge/internal/controller/relayer/l1_relayer_metrics.go
Normal file
54
bridge/internal/controller/relayer/l1_relayer_metrics.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l1RelayerMetrics struct {
|
||||
bridgeL1RelayedMsgsTotal prometheus.Counter
|
||||
bridgeL1RelayedMsgsFailureTotal prometheus.Counter
|
||||
bridgeL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
bridgeL1RelayerLastGasPrice prometheus.Gauge
|
||||
bridgeL1MsgsRelayedConfirmedTotal prometheus.Counter
|
||||
bridgeL1GasOraclerConfirmedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL1RelayerMetricOnce sync.Once
|
||||
l1RelayerMetric *l1RelayerMetrics
|
||||
)
|
||||
|
||||
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
initL1RelayerMetricOnce.Do(func() {
|
||||
l1RelayerMetric = &l1RelayerMetrics{
|
||||
bridgeL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_msg_relayed_total",
|
||||
Help: "The total number of the l1 relayed message.",
|
||||
}),
|
||||
bridgeL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_msg_relayed_failure_total",
|
||||
Help: "The total number of the l1 relayed failure message.",
|
||||
}),
|
||||
bridgeL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_relayed_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
bridgeL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_gas_price_oracler_total",
|
||||
Help: "The total number of layer1 gas price oracler run total",
|
||||
}),
|
||||
bridgeL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_layer1_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of bridge relayer l1",
|
||||
}),
|
||||
bridgeL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_gas_oracler_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l1RelayerMetric
|
||||
}
|
||||
@@ -18,8 +18,35 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
templateL1Message = []*orm.L1Message{
|
||||
{
|
||||
QueueIndex: 1,
|
||||
MsgHash: "msg_hash1",
|
||||
Height: 1,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer1Hash: "hash0",
|
||||
},
|
||||
{
|
||||
QueueIndex: 2,
|
||||
MsgHash: "msg_hash2",
|
||||
Height: 2,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer1Hash: "hash1",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
@@ -40,6 +67,61 @@ func testCreateNewL1Relayer(t *testing.T) {
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
|
||||
func testL1RelayerProcessSaveEvents(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
l1Cfg := cfg.L1Config
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
|
||||
relayer.ProcessSavedEvents()
|
||||
msg1, err := l1MessageOrm.GetL1MessageByQueueIndex(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg1.Status), types.MsgSubmitted)
|
||||
msg2, err := l1MessageOrm.GetL1MessageByQueueIndex(2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg2.Status), types.MsgSubmitted)
|
||||
}
|
||||
|
||||
func testL1RelayerMsgConfirm(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
l1Messages := []*orm.L1Message{
|
||||
{MsgHash: "msg-1", QueueIndex: 0},
|
||||
{MsgHash: "msg-2", QueueIndex: 1},
|
||||
}
|
||||
err := l1MessageOrm.SaveL1Messages(context.Background(), l1Messages)
|
||||
assert.NoError(t, err)
|
||||
// Create and set up the Layer1 Relayer.
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-1",
|
||||
IsSuccessful: true,
|
||||
})
|
||||
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-2",
|
||||
IsSuccessful: false,
|
||||
})
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
msg1, err1 := l1MessageOrm.GetL1MessageByMsgHash("msg-1")
|
||||
msg2, err2 := l1MessageOrm.GetL1MessageByMsgHash("msg-2")
|
||||
return err1 == nil && types.MsgStatus(msg1.Status) == types.MsgConfirmed &&
|
||||
err2 == nil && types.MsgStatus(msg2.Status) == types.MsgRelayFailed
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -20,10 +19,10 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// Layer2Relayer is responsible for
|
||||
@@ -44,6 +43,9 @@ type Layer2Relayer struct {
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
messageSender *sender.Sender
|
||||
l1MessengerABI *abi.ABI
|
||||
|
||||
commitSender *sender.Sender
|
||||
finalizeSender *sender.Sender
|
||||
l1RollupABI *abi.ABI
|
||||
@@ -51,13 +53,12 @@ type Layer2Relayer struct {
|
||||
gasOracleSender *sender.Sender
|
||||
l2GasOracleABI *abi.ABI
|
||||
|
||||
minGasLimitForMessageRelay uint64
|
||||
|
||||
lastGasPrice uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
|
||||
// Used to get batch status from chain_monitor api.
|
||||
chainMonitorClient *resty.Client
|
||||
|
||||
// A list of processing message.
|
||||
// key(string): confirmation ID, value(string): layer2 hash.
|
||||
processingMessage sync.Map
|
||||
@@ -75,6 +76,12 @@ type Layer2Relayer struct {
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l2_relayer", "message_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new message sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
|
||||
@@ -102,10 +109,10 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
gasPriceDiff = defaultGasPriceDiff
|
||||
}
|
||||
|
||||
// chain_monitor client
|
||||
chainMonitorClient := resty.New()
|
||||
chainMonitorClient.SetRetryCount(cfg.ChainMonitor.TryTimes)
|
||||
chainMonitorClient.SetTimeout(time.Duration(cfg.ChainMonitor.TimeOut) * time.Second)
|
||||
minGasLimitForMessageRelay := uint64(defaultL2MessageRelayMinGasLimit)
|
||||
if cfg.MessageRelayMinGasLimit != 0 {
|
||||
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
|
||||
}
|
||||
|
||||
layer2Relayer := &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
@@ -117,6 +124,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
|
||||
l2Client: l2Client,
|
||||
|
||||
messageSender: messageSender,
|
||||
l1MessengerABI: bridgeAbi.L1ScrollMessengerABI,
|
||||
|
||||
commitSender: commitSender,
|
||||
finalizeSender: finalizeSender,
|
||||
l1RollupABI: bridgeAbi.ScrollChainABI,
|
||||
@@ -124,6 +134,8 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
gasOracleSender: gasOracleSender,
|
||||
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
|
||||
|
||||
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
|
||||
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
|
||||
@@ -131,7 +143,6 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
processingMessage: sync.Map{},
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
chainMonitorClient: chainMonitorClient,
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
@@ -264,7 +275,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.rollupL2RelayerGasPriceOraclerRunTotal.Inc()
|
||||
r.metrics.bridgeL2RelayerGasPriceOraclerRunTotal.Inc()
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
if batch == nil || err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
|
||||
@@ -302,7 +313,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = suggestGasPriceUint64
|
||||
r.metrics.rollupL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
r.metrics.bridgeL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
|
||||
}
|
||||
}
|
||||
@@ -311,13 +322,13 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// get pending batches from database in ascending order by their index.
|
||||
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 5)
|
||||
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
for _, batch := range pendingBatches {
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchTotal.Inc()
|
||||
r.metrics.bridgeL2RelayerProcessPendingBatchTotal.Inc()
|
||||
// get current header and parent header.
|
||||
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
|
||||
if err != nil {
|
||||
@@ -374,8 +385,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
// send transaction
|
||||
txID := batch.Hash + "-commit"
|
||||
minGasLimit := uint64(float64(batch.TotalL1CommitGas) * r.cfg.GasCostIncreaseMultiplier)
|
||||
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, minGasLimit)
|
||||
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
@@ -400,7 +410,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
r.metrics.bridgeL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
r.processingCommitment.Store(txID, batch.Hash)
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
|
||||
}
|
||||
@@ -424,7 +434,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesTotal.Inc()
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesTotal.Inc()
|
||||
|
||||
batch := batches[0]
|
||||
hash := batch.Hash
|
||||
@@ -435,20 +445,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
return
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
|
||||
// Check batch status before send `finalizeBatchWithProof` tx.
|
||||
//batchStatus, err := r.getBatchStatusByIndex(batch.Index)
|
||||
//if err != nil {
|
||||
// r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
|
||||
// log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
|
||||
// return
|
||||
//}
|
||||
//if !batchStatus {
|
||||
// r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
|
||||
// log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
|
||||
// return
|
||||
//}
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
@@ -503,6 +500,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
|
||||
log.Debug(
|
||||
"finalizeBatchWithProof in layer1 failed",
|
||||
"index", batch.Index,
|
||||
@@ -524,7 +522,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
"tx hash", finalizeTxHash.String(), "err", err)
|
||||
}
|
||||
r.processingFinalization.Store(txID, hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
|
||||
case types.ProvingTaskFailed:
|
||||
// We were unable to prove this batch. There are two possibilities:
|
||||
@@ -550,29 +548,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
}
|
||||
|
||||
// batchStatusResponse the response schema
|
||||
type batchStatusResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data bool `json:"data"`
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) getBatchStatusByIndex(batchIndex uint64) (bool, error) {
|
||||
var response batchStatusResponse
|
||||
resp, err := r.chainMonitorClient.R().SetResult(&response).Get(fmt.Sprintf("%s/v1/batch_status?batch_index=%d", r.cfg.ChainMonitor.BaseURL, batchIndex))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if resp.IsError() {
|
||||
return false, resp.Error().(error)
|
||||
}
|
||||
if response.ErrCode != 0 {
|
||||
return false, fmt.Errorf("failed to get batch status, errCode: %d, errMsg: %s", response.ErrCode, response.ErrMsg)
|
||||
}
|
||||
|
||||
return response.Data, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
transactionType := "Unknown"
|
||||
// check whether it is CommitBatches transaction
|
||||
@@ -592,7 +567,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
r.metrics.rollupL2BatchesCommittedConfirmedTotal.Inc()
|
||||
r.metrics.bridgeL2BatchesCommittedConfirmedTotal.Inc()
|
||||
r.processingCommitment.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
@@ -614,7 +589,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
r.metrics.rollupL2BatchesFinalizedConfirmedTotal.Inc()
|
||||
r.metrics.bridgeL2BatchesFinalizedConfirmedTotal.Inc()
|
||||
r.processingFinalization.Delete(confirmation.ID)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
|
||||
@@ -625,12 +600,14 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.messageSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.commitSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.finalizeSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.rollupL2BatchesGasOraclerConfirmedTotal.Inc()
|
||||
r.metrics.bridgeL2BatchesGasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
74
bridge/internal/controller/relayer/l2_relayer_metrics.go
Normal file
74
bridge/internal/controller/relayer/l2_relayer_metrics.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l2RelayerMetrics struct {
|
||||
bridgeL2RelayerProcessPendingBatchTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
|
||||
bridgeL2RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
bridgeL2RelayerLastGasPrice prometheus.Gauge
|
||||
bridgeL2RelayerProcessCommittedBatchesTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
|
||||
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
|
||||
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
|
||||
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL2RelayerMetricOnce sync.Once
|
||||
l2RelayerMetric *l2RelayerMetrics
|
||||
)
|
||||
|
||||
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
initL2RelayerMetricOnce.Do(func() {
|
||||
l2RelayerMetric = &l2RelayerMetrics{
|
||||
bridgeL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_pending_batch_total",
|
||||
Help: "The total number of layer2 process pending batch",
|
||||
}),
|
||||
bridgeL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_pending_batch_success_total",
|
||||
Help: "The total number of layer2 process pending success batch",
|
||||
}),
|
||||
bridgeL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_gas_price_oracler_total",
|
||||
Help: "The total number of layer2 gas price oracler run total",
|
||||
}),
|
||||
bridgeL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_layer2_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of bridge relayer l2",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_total",
|
||||
Help: "The total number of layer2 process committed batches run total",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_finalized_total",
|
||||
Help: "The total number of layer2 process committed batches finalized total",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_finalized_success_total",
|
||||
Help: "The total number of layer2 process committed batches finalized success total",
|
||||
}),
|
||||
bridgeL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process committed batches confirmed total",
|
||||
}),
|
||||
bridgeL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_finalized_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
}),
|
||||
bridgeL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2RelayerMetric
|
||||
}
|
||||
@@ -4,12 +4,9 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -22,8 +19,8 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
@@ -370,33 +367,3 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
}
|
||||
|
||||
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
|
||||
router := gin.New()
|
||||
r := router.Group("/v1")
|
||||
r.GET("/batch_status", func(ctx *gin.Context) {
|
||||
ctx.JSON(http.StatusOK, struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data bool `json:"data"`
|
||||
}{
|
||||
ErrCode: 0,
|
||||
ErrMsg: "",
|
||||
Data: true,
|
||||
})
|
||||
})
|
||||
return utils.StartHTTPServer(strings.Split(baseURL, "//")[1], router)
|
||||
}
|
||||
|
||||
func testGetBatchStatusByIndex(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
|
||||
status, err := relayer.getBatchStatusByIndex(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, status)
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -86,12 +86,10 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestFunctions(t *testing.T) {
|
||||
setupEnv(t)
|
||||
srv, err := mockChainMonitorServer(cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL)
|
||||
assert.NoError(t, err)
|
||||
defer srv.Close()
|
||||
|
||||
// Run l1 relayer test cases.
|
||||
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
|
||||
t.Run("TestL1RelayerProcessSaveEvents", testL1RelayerProcessSaveEvents)
|
||||
t.Run("TestL1RelayerMsgConfirm", testL1RelayerMsgConfirm)
|
||||
t.Run("TestL1RelayerGasOracleConfirm", testL1RelayerGasOracleConfirm)
|
||||
t.Run("TestL1RelayerProcessGasPriceOracle", testL1RelayerProcessGasPriceOracle)
|
||||
|
||||
@@ -103,6 +101,4 @@ func TestFunctions(t *testing.T) {
|
||||
t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
|
||||
// test getBatchStatusByIndex
|
||||
t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex)
|
||||
}
|
||||
@@ -16,6 +16,7 @@ func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Add
|
||||
log.Error("estimateLegacyGas SuggestGasPrice failure", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gasLimit, err := s.estimateGasLimit(auth, contract, input, gasPrice, nil, nil, value, minGasLimit)
|
||||
if err != nil {
|
||||
log.Error("estimateLegacyGas estimateGasLimit failure", "gasPrice", gasPrice, "error", err)
|
||||
@@ -45,10 +46,7 @@ func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Ad
|
||||
gasLimit, err := s.estimateGasLimit(auth, contract, input, nil, gasTipCap, gasFeeCap, value, minGasLimit)
|
||||
if err != nil {
|
||||
log.Error("estimateDynamicGas estimateGasLimit failure", "error", err)
|
||||
if minGasLimit == 0 {
|
||||
return nil, err
|
||||
}
|
||||
gasLimit = minGasLimit
|
||||
return nil, err
|
||||
}
|
||||
return &FeeData{
|
||||
gasLimit: gasLimit,
|
||||
@@ -69,7 +67,7 @@ func (s *Sender) estimateGasLimit(opts *bind.TransactOpts, contract *common.Addr
|
||||
}
|
||||
gasLimit, err := s.client.EstimateGas(s.ctx, msg)
|
||||
if err != nil {
|
||||
log.Error("estimateGasLimit EstimateGas failure", "error", err)
|
||||
log.Error("estimateGasLimit EstimateGas failure", "msg", msg, "error", err)
|
||||
return 0, err
|
||||
}
|
||||
if minGasLimit > gasLimit {
|
||||
@@ -33,59 +33,59 @@ func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
|
||||
initSenderMetricOnce.Do(func() {
|
||||
sm = &senderMetrics{
|
||||
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_total",
|
||||
Name: "bridge_sender_send_transaction_total",
|
||||
Help: "The total number of sending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_send_transaction_full_tx_failure_total",
|
||||
Name: "bridge_sender_send_transaction_full_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for full size tx.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_repeat_transaction_failure_total",
|
||||
Name: "bridge_sender_send_transaction_repeat_transaction_failure_total",
|
||||
Help: "The total number of sending transaction failure for repeat transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_get_fee_failure_total",
|
||||
Name: "bridge_sender_send_transaction_get_fee_failure_total",
|
||||
Help: "The total number of sending transaction failure for getting fee.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_send_tx_failure_total",
|
||||
Name: "bridge_sender_send_transaction_send_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for sending tx.",
|
||||
}, []string{"service", "name"}),
|
||||
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_resubmit_send_transaction_total",
|
||||
Name: "bridge_sender_send_transaction_resubmit_send_transaction_total",
|
||||
Help: "The total number of resubmit transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_pending_tx_count",
|
||||
Name: "bridge_sender_pending_tx_count",
|
||||
Help: "The pending tx count in the sender.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_fee_cap",
|
||||
Name: "bridge_sender_gas_fee_cap",
|
||||
Help: "The gas fee of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_tip_cap",
|
||||
Name: "bridge_sender_gas_tip_cap",
|
||||
Help: "The gas tip of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_price_cap",
|
||||
Name: "bridge_sender_gas_price_cap",
|
||||
Help: "The gas price of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_limit",
|
||||
Name: "bridge_sender_gas_limit",
|
||||
Help: "The gas limit of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_nonce",
|
||||
Name: "bridge_sender_nonce",
|
||||
Help: "The nonce of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_check_pending_transaction_total",
|
||||
Name: "bridge_sender_check_pending_transaction_total",
|
||||
Help: "The total number of check pending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_check_balancer_total",
|
||||
Name: "bridge_sender_check_balancer_total",
|
||||
Help: "The total number of check balancer.",
|
||||
}, []string{"service", "name"}),
|
||||
}
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,6 +56,15 @@ type FeeData struct {
|
||||
gasLimit uint64
|
||||
}
|
||||
|
||||
func newEmptyFeeData() *FeeData {
|
||||
return &FeeData{
|
||||
gasFeeCap: big.NewInt(0),
|
||||
gasTipCap: big.NewInt(0),
|
||||
gasPrice: big.NewInt(0),
|
||||
gasLimit: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// PendingTransaction submitted but pending transactions
|
||||
type PendingTransaction struct {
|
||||
submitAt uint64
|
||||
@@ -85,6 +94,8 @@ type Sender struct {
|
||||
stopCh chan struct{}
|
||||
|
||||
metrics *senderMetrics
|
||||
|
||||
cachedMaxFeeData *FeeData // hacky way to avoid getFeeData error
|
||||
}
|
||||
|
||||
// NewSender returns a new instance of transaction sender
|
||||
@@ -129,19 +140,20 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
|
||||
}
|
||||
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
client: client,
|
||||
chainID: chainID,
|
||||
auth: auth,
|
||||
minBalance: config.MinBalance,
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: cmapV2.New[*PendingTransaction](),
|
||||
stopCh: make(chan struct{}),
|
||||
name: name,
|
||||
service: service,
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
client: client,
|
||||
chainID: chainID,
|
||||
auth: auth,
|
||||
minBalance: config.MinBalance,
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: cmapV2.New[*PendingTransaction](),
|
||||
stopCh: make(chan struct{}),
|
||||
name: name,
|
||||
service: service,
|
||||
cachedMaxFeeData: newEmptyFeeData(),
|
||||
}
|
||||
sender.metrics = initSenderMetrics(reg)
|
||||
|
||||
@@ -189,6 +201,26 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
|
||||
return s.estimateLegacyGas(auth, target, value, data, minGasLimit)
|
||||
}
|
||||
|
||||
func (s *Sender) cacheMaxFeeData(feeData *FeeData) {
|
||||
if feeData == nil {
|
||||
log.Error("cacheMaxFeeData", "err", "feeData must not be nil")
|
||||
return
|
||||
}
|
||||
|
||||
if feeData.gasFeeCap != nil && feeData.gasFeeCap.Cmp(s.cachedMaxFeeData.gasFeeCap) > 0 {
|
||||
s.cachedMaxFeeData.gasFeeCap = feeData.gasFeeCap
|
||||
}
|
||||
if feeData.gasTipCap != nil && feeData.gasTipCap.Cmp(s.cachedMaxFeeData.gasTipCap) > 0 {
|
||||
s.cachedMaxFeeData.gasTipCap = feeData.gasTipCap
|
||||
}
|
||||
if feeData.gasPrice != nil && feeData.gasPrice.Cmp(s.cachedMaxFeeData.gasPrice) > 0 {
|
||||
s.cachedMaxFeeData.gasPrice = feeData.gasPrice
|
||||
}
|
||||
if feeData.gasLimit > s.cachedMaxFeeData.gasLimit {
|
||||
s.cachedMaxFeeData.gasLimit = feeData.gasLimit
|
||||
}
|
||||
}
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (common.Hash, error) {
|
||||
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
@@ -218,7 +250,12 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
|
||||
if feeData, err = s.getFeeData(s.auth, target, value, data, minGasLimit); err != nil {
|
||||
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to get fee data", "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
|
||||
if s.cachedMaxFeeData.gasLimit == 0 { // if no MaxFeeData cached, and getFeeData fails
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data for the first time, err: %w", err)
|
||||
}
|
||||
feeData = s.cachedMaxFeeData
|
||||
} else {
|
||||
s.cacheMaxFeeData(feeData)
|
||||
}
|
||||
|
||||
if tx, err = s.createAndSendTx(s.auth, feeData, target, value, data, nil); err != nil {
|
||||
@@ -411,6 +448,7 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
}
|
||||
|
||||
log.Debug("Transaction gas adjustment details", txInfo)
|
||||
s.cacheMaxFeeData(feeData)
|
||||
|
||||
nonce := tx.Nonce()
|
||||
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
)
|
||||
|
||||
const TXBatch = 50
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// BatchProposer proposes batches based on available unbatched chunks.
|
||||
@@ -63,39 +63,39 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_circle_total",
|
||||
Name: "bridge_propose_batch_circle_total",
|
||||
Help: "Total number of propose batch total.",
|
||||
}),
|
||||
proposeBatchFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_failure_circle_total",
|
||||
Name: "bridge_propose_batch_failure_circle_total",
|
||||
Help: "Total number of propose batch total.",
|
||||
}),
|
||||
proposeBatchUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_update_info_total",
|
||||
Name: "bridge_propose_batch_update_info_total",
|
||||
Help: "Total number of propose batch update info total.",
|
||||
}),
|
||||
proposeBatchUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_update_info_failure_total",
|
||||
Name: "bridge_propose_batch_update_info_failure_total",
|
||||
Help: "Total number of propose batch update info failure total.",
|
||||
}),
|
||||
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_total_l1_commit_gas",
|
||||
Name: "bridge_propose_batch_total_l1_commit_gas",
|
||||
Help: "The total l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_total_l1_call_data_size",
|
||||
Name: "bridge_propose_batch_total_l1_call_data_size",
|
||||
Help: "The total l1 call data size",
|
||||
}),
|
||||
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_chunks_number",
|
||||
Name: "bridge_propose_batch_chunks_number",
|
||||
Help: "The number of chunks in the batch",
|
||||
}),
|
||||
batchFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_first_block_timeout_reached_total",
|
||||
Name: "bridge_propose_batch_first_block_timeout_reached_total",
|
||||
Help: "Total times of batch's first block timeout reached",
|
||||
}),
|
||||
batchChunksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_chunks_propose_not_enough_total",
|
||||
Name: "bridge_propose_batch_chunks_propose_not_enough_total",
|
||||
Help: "Total number of batch chunk propose not enough",
|
||||
}),
|
||||
}
|
||||
@@ -122,7 +122,7 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk, batchMeta *ty
|
||||
if numChunks <= 0 {
|
||||
return nil
|
||||
}
|
||||
chunks, err := p.dbChunksToRollupChunks(dbChunks)
|
||||
chunks, err := p.dbChunksToBridgeChunks(dbChunks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -269,7 +269,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) dbChunksToRollupChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
|
||||
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
|
||||
chunks := make([]*types.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
wrappedBlocks, err := p.l2BlockOrm.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// TODO: Add unit tests that the limits are enforced correctly.
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// maxNumBlockPerChunk is the maximum number of blocks we allow per chunk.
|
||||
@@ -98,51 +98,51 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_circle_total",
|
||||
Name: "bridge_propose_chunk_circle_total",
|
||||
Help: "Total number of propose chunk total.",
|
||||
}),
|
||||
proposeChunkFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_failure_circle_total",
|
||||
Name: "bridge_propose_chunk_failure_circle_total",
|
||||
Help: "Total number of propose chunk failure total.",
|
||||
}),
|
||||
proposeChunkUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_update_info_total",
|
||||
Name: "bridge_propose_chunk_update_info_total",
|
||||
Help: "Total number of propose chunk update info total.",
|
||||
}),
|
||||
proposeChunkUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_update_info_failure_total",
|
||||
Name: "bridge_propose_chunk_update_info_failure_total",
|
||||
Help: "Total number of propose chunk update info failure total.",
|
||||
}),
|
||||
chunkTxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_tx_num",
|
||||
Name: "bridge_propose_chunk_tx_num",
|
||||
Help: "The chunk tx num",
|
||||
}),
|
||||
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_estimate_l1_commit_gas",
|
||||
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
|
||||
Help: "The chunk estimate l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_total_l1_commit_call_data_size",
|
||||
Name: "bridge_propose_chunk_total_l1_commit_call_data_size",
|
||||
Help: "The total l1 commit call data size",
|
||||
}),
|
||||
totalTxGasUsed: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_total_tx_gas_used",
|
||||
Name: "bridge_propose_chunk_total_tx_gas_used",
|
||||
Help: "The total tx gas used",
|
||||
}),
|
||||
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_max_tx_consumption",
|
||||
Name: "bridge_propose_chunk_max_tx_consumption",
|
||||
Help: "The max tx consumption",
|
||||
}),
|
||||
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_chunk_block_number",
|
||||
Name: "bridge_propose_chunk_chunk_block_number",
|
||||
Help: "The number of blocks in the chunk",
|
||||
}),
|
||||
chunkFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_first_block_timeout_reached_total",
|
||||
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
|
||||
Help: "Total times of chunk's first block timeout reached",
|
||||
}),
|
||||
chunkBlocksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_blocks_propose_not_enough_total",
|
||||
Name: "bridge_propose_chunk_blocks_propose_not_enough_total",
|
||||
Help: "Total number of chunk block propose not enough",
|
||||
}),
|
||||
}
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// TODO: Add unit tests that the limits are enforced correctly.
|
||||
@@ -17,9 +17,9 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
type rollupEvent struct {
|
||||
@@ -39,6 +39,9 @@ type L1WatcherClient struct {
|
||||
// The number of new blocks to wait for a block to be confirmed
|
||||
confirmations rpc.BlockNumber
|
||||
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
messageQueueAddress common.Address
|
||||
messageQueueABI *abi.ABI
|
||||
|
||||
@@ -54,7 +57,7 @@ type L1WatcherClient struct {
|
||||
}
|
||||
|
||||
// NewL1WatcherClient returns a new instance of L1WatcherClient.
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
savedHeight, err := l1MessageOrm.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
@@ -83,6 +86,9 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
batchOrm: orm.NewBatch(db),
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridgeAbi.L1ScrollMessengerABI,
|
||||
|
||||
messageQueueAddress: messageQueueAddress,
|
||||
messageQueueABI: bridgeAbi.L1MessageQueueABI,
|
||||
|
||||
@@ -193,15 +199,18 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.scrollChainAddress,
|
||||
w.messageQueueAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 3)
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0][0] = bridgeAbi.L1QueueTransactionEventSignature
|
||||
query.Topics[0][1] = bridgeAbi.L1CommitBatchEventSignature
|
||||
query.Topics[0][2] = bridgeAbi.L1FinalizeBatchEventSignature
|
||||
query.Topics[0][1] = bridgeAbi.L1RelayedMessageEventSignature
|
||||
query.Topics[0][2] = bridgeAbi.L1FailedRelayedMessageEventSignature
|
||||
query.Topics[0][3] = bridgeAbi.L1CommitBatchEventSignature
|
||||
query.Topics[0][4] = bridgeAbi.L1FinalizeBatchEventSignature
|
||||
|
||||
logs, err := w.client.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
@@ -26,31 +26,31 @@ func initL1WatcherMetrics(reg prometheus.Registerer) *l1WatcherMetrics {
|
||||
initL1WatcherMetricOnce.Do(func() {
|
||||
l1WatcherMetric = &l1WatcherMetrics{
|
||||
l1WatcherFetchBlockHeaderTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_header_total",
|
||||
Name: "bridge_l1_watcher_fetch_block_header_total",
|
||||
Help: "The total number of l1 watcher fetch block header total",
|
||||
}),
|
||||
l1WatcherFetchBlockHeaderProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_header_processed_block_height",
|
||||
Name: "bridge_l1_watcher_fetch_block_header_processed_block_height",
|
||||
Help: "The current processed block height of l1 watcher fetch block header",
|
||||
}),
|
||||
l1WatcherFetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_contract_event_total",
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_total",
|
||||
Help: "The total number of l1 watcher fetch contract event total",
|
||||
}),
|
||||
l1WatcherFetchContractEventSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_contract_event_success_total",
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_success_total",
|
||||
Help: "The total number of l1 watcher fetch contract event success total",
|
||||
}),
|
||||
l1WatcherFetchContractEventProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_contract_event_processed_block_height",
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_processed_block_height",
|
||||
Help: "The current processed block height of l1 watcher fetch contract event",
|
||||
}),
|
||||
l1WatcherFetchContractEventSentEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_contract_event_sent_event_total",
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_sent_event_total",
|
||||
Help: "The current processed block height of l1 watcher fetch contract sent event",
|
||||
}),
|
||||
l1WatcherFetchContractEventRollupEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l1_watcher_fetch_block_contract_event_rollup_event_total",
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_rollup_event_total",
|
||||
Help: "The current processed block height of l1 watcher fetch contract rollup event",
|
||||
}),
|
||||
}
|
||||
@@ -20,9 +20,9 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
commonTypes "scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
@@ -30,7 +30,8 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
l1Cfg := cfg.L1Config
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
|
||||
l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
|
||||
assert.NoError(t, watcher.FetchContractEvent())
|
||||
return watcher, db
|
||||
}
|
||||
@@ -19,9 +19,9 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
|
||||
@@ -116,7 +116,7 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
return
|
||||
}
|
||||
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
|
||||
w.metrics.rollupL2BlocksFetchedGap.Set(float64(blockHeight - to))
|
||||
w.metrics.bridgeL2BlocksFetchedGap.Set(float64(blockHeight - to))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -246,7 +246,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
w.metrics.rollupL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
|
||||
w.metrics.bridgeL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
|
||||
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
@@ -12,8 +12,8 @@ type l2WatcherMetrics struct {
|
||||
fetchRunningMissingBlocksHeight prometheus.Gauge
|
||||
fetchContractEventTotal prometheus.Counter
|
||||
fetchContractEventHeight prometheus.Gauge
|
||||
rollupL2MsgsRelayedEventsTotal prometheus.Counter
|
||||
rollupL2BlocksFetchedGap prometheus.Gauge
|
||||
bridgeL2MsgsRelayedEventsTotal prometheus.Counter
|
||||
bridgeL2BlocksFetchedGap prometheus.Gauge
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -25,27 +25,27 @@ func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
|
||||
initL2WatcherMetricOnce.Do(func() {
|
||||
l2WatcherMetric = &l2WatcherMetrics{
|
||||
fetchRunningMissingBlocksTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l2_watcher_fetch_running_missing_blocks_total",
|
||||
Name: "bridge_l2_watcher_fetch_running_missing_blocks_total",
|
||||
Help: "The total number of l2 watcher fetch running missing blocks",
|
||||
}),
|
||||
fetchRunningMissingBlocksHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_watcher_fetch_running_missing_blocks_height",
|
||||
Name: "bridge_l2_watcher_fetch_running_missing_blocks_height",
|
||||
Help: "The total number of l2 watcher fetch running missing blocks height",
|
||||
}),
|
||||
fetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l2_watcher_fetch_contract_events_total",
|
||||
Name: "bridge_l2_watcher_fetch_contract_events_total",
|
||||
Help: "The total number of l2 watcher fetch contract events",
|
||||
}),
|
||||
fetchContractEventHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_watcher_fetch_contract_height",
|
||||
Name: "bridge_l2_watcher_fetch_contract_height",
|
||||
Help: "The total number of l2 watcher fetch contract height",
|
||||
}),
|
||||
rollupL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l2_watcher_msg_relayed_events_total",
|
||||
bridgeL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_msg_relayed_events_total",
|
||||
Help: "The total number of l2 watcher msg relayed event",
|
||||
}),
|
||||
rollupL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_watcher_blocks_fetched_gap",
|
||||
bridgeL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_blocks_fetched_gap",
|
||||
Help: "The gap of l2 fetch",
|
||||
}),
|
||||
}
|
||||
@@ -24,11 +24,11 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
cutils "scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
"scroll-tech/rollup/mock_bridge"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
)
|
||||
|
||||
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
|
||||
@@ -51,7 +51,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.GasOracleSenderPrivateKey, "test", "test", nil)
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create several transactions and commit to block
|
||||
@@ -72,7 +72,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.GasOracleSenderPrivateKey)
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKey)
|
||||
|
||||
// deploy mock bridge
|
||||
_, tx, _, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
)
|
||||
|
||||
// Keccak2 compute the keccack256 of two concatenations of bytes32
|
||||
@@ -2,11 +2,8 @@ package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -16,17 +13,16 @@ import (
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
bcmd "scroll-tech/rollup/cmd"
|
||||
"scroll-tech/rollup/mock_bridge"
|
||||
bcmd "scroll-tech/bridge/cmd"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
rollupApp *bcmd.MockApp
|
||||
bridgeApp *bcmd.MockApp
|
||||
|
||||
// clients
|
||||
l1Client *ethclient.Client
|
||||
@@ -36,9 +32,17 @@ var (
|
||||
l1Auth *bind.TransactOpts
|
||||
l2Auth *bind.TransactOpts
|
||||
|
||||
// l1 messenger contract
|
||||
l1MessengerInstance *mock_bridge.MockBridgeL1
|
||||
l1MessengerAddress common.Address
|
||||
|
||||
// l1 rollup contract
|
||||
scrollChainInstance *mock_bridge.MockBridgeL1
|
||||
scrollChainAddress common.Address
|
||||
|
||||
// l2 messenger contract
|
||||
l2MessengerInstance *mock_bridge.MockBridgeL2
|
||||
l2MessengerAddress common.Address
|
||||
)
|
||||
|
||||
func setupDB(t *testing.T) *gorm.DB {
|
||||
@@ -58,8 +62,8 @@ func setupDB(t *testing.T) *gorm.DB {
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
rollupApp = bcmd.NewRollupApp(base, "../conf/config.json")
|
||||
defer rollupApp.Free()
|
||||
bridgeApp = bcmd.NewBridgeApp(base, "../conf/config.json")
|
||||
defer bridgeApp.Free()
|
||||
defer base.Free()
|
||||
m.Run()
|
||||
}
|
||||
@@ -73,57 +77,57 @@ func setupEnv(t *testing.T) {
|
||||
l2Client, err = base.L2Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Cfg, l2Cfg := rollupApp.Config.L1Config, rollupApp.Config.L2Config
|
||||
l1Cfg, l2Cfg := bridgeApp.Config.L1Config, bridgeApp.Config.L2Config
|
||||
l1Cfg.Confirmations = 0
|
||||
l1Cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
l2Cfg.Confirmations = 0
|
||||
l2Cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, base.L1gethImg.ChainID())
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.MessageSenderPrivateKey, base.L1gethImg.ChainID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2Auth, err = bind.NewKeyedTransactorWithChainID(rollupApp.Config.L1Config.RelayerConfig.GasOracleSenderPrivateKey, base.L2gethImg.ChainID())
|
||||
l2Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L1Config.RelayerConfig.MessageSenderPrivateKey, base.L2gethImg.ChainID())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
|
||||
router := gin.New()
|
||||
r := router.Group("/v1")
|
||||
r.GET("/batch_status", func(ctx *gin.Context) {
|
||||
ctx.JSON(http.StatusOK, struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data bool `json:"data"`
|
||||
}{
|
||||
ErrCode: 0,
|
||||
ErrMsg: "",
|
||||
Data: true,
|
||||
})
|
||||
})
|
||||
return utils.StartHTTPServer(strings.Split(baseURL, "//")[1], router)
|
||||
}
|
||||
|
||||
func prepareContracts(t *testing.T) {
|
||||
var err error
|
||||
var tx *types.Transaction
|
||||
|
||||
// L1 messenger contract
|
||||
_, tx, l1MessengerInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
assert.NoError(t, err)
|
||||
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L1 ScrolChain contract
|
||||
_, tx, scrollChainInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
assert.NoError(t, err)
|
||||
scrollChainAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Config, l2Config := rollupApp.Config.L1Config, rollupApp.Config.L2Config
|
||||
l1Config.ScrollChainContractAddress = scrollChainAddress
|
||||
// L2 messenger contract
|
||||
_, tx, l2MessengerInstance, err = mock_bridge.DeployMockBridgeL2(l2Auth, l2Client)
|
||||
assert.NoError(t, err)
|
||||
l2MessengerAddress, err = bind.WaitDeployed(context.Background(), l2Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Config, l2Config := bridgeApp.Config.L1Config, bridgeApp.Config.L2Config
|
||||
l1Config.L1MessengerAddress = l1MessengerAddress
|
||||
l1Config.L1MessageQueueAddress = l1MessengerAddress
|
||||
l1Config.ScrollChainContractAddress = scrollChainAddress
|
||||
l1Config.RelayerConfig.MessengerContractAddress = l2MessengerAddress
|
||||
l1Config.RelayerConfig.GasPriceOracleContractAddress = l1MessengerAddress
|
||||
|
||||
l2Config.L2MessengerAddress = l2MessengerAddress
|
||||
l2Config.L2MessageQueueAddress = l2MessengerAddress
|
||||
l2Config.RelayerConfig.MessengerContractAddress = l1MessengerAddress
|
||||
l2Config.RelayerConfig.RollupContractAddress = scrollChainAddress
|
||||
l2Config.RelayerConfig.GasPriceOracleContractAddress = l2MessengerAddress
|
||||
}
|
||||
|
||||
func TestFunction(t *testing.T) {
|
||||
setupEnv(t)
|
||||
srv, err := mockChainMonitorServer(rollupApp.Config.L2Config.RelayerConfig.ChainMonitor.BaseURL)
|
||||
assert.NoError(t, err)
|
||||
defer srv.Close()
|
||||
|
||||
// process start test
|
||||
t.Run("TestProcessStart", testProcessStart)
|
||||
@@ -133,6 +137,7 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
|
||||
|
||||
// l1 message
|
||||
t.Run("TestRelayL1MessageSucceed", testRelayL1MessageSucceed)
|
||||
|
||||
// l2 message
|
||||
// TODO: add a "user relay l2msg Succeed" test
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
func testImportL1GasPrice(t *testing.T) {
|
||||
@@ -23,7 +23,7 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
@@ -32,7 +32,8 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0,
|
||||
l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
@@ -66,7 +67,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
83
bridge/tests/l1_message_relay_test.go
Normal file
83
bridge/tests/l1_message_relay_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
func testRelayL1MessageSucceed(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
// Create L1Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress,
|
||||
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// Create L2Watcher
|
||||
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress,
|
||||
l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db, nil)
|
||||
|
||||
// send message through l1 messenger contract
|
||||
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
assert.NoError(t, err)
|
||||
sendTx, err := l1MessengerInstance.SendMessage(l1Auth, l2Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
|
||||
assert.NoError(t, err)
|
||||
sendReceipt, err := bind.WaitMined(context.Background(), l1Client, sendTx)
|
||||
assert.NoError(t, err)
|
||||
if sendReceipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// l1 watch process events
|
||||
l1Watcher.FetchContractEvent()
|
||||
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
// check db status
|
||||
msg, err := l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg.Status), types.MsgPending)
|
||||
assert.Equal(t, msg.Target, l2Auth.From.String())
|
||||
|
||||
// process l1 messages
|
||||
l1Relayer.ProcessSavedEvents()
|
||||
|
||||
l1Message, err := l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, l1Message.Layer2Hash)
|
||||
assert.Equal(t, types.MsgStatus(l1Message.Status), types.MsgSubmitted)
|
||||
|
||||
relayTx, _, err := l2Client.TransactionByHash(context.Background(), common.HexToHash(l1Message.Layer2Hash))
|
||||
assert.NoError(t, err)
|
||||
relayTxReceipt, err := bind.WaitMined(context.Background(), l2Client, relayTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(relayTxReceipt.Logs), 1)
|
||||
|
||||
// fetch message relayed events
|
||||
l2Watcher.FetchContractEvent()
|
||||
msg, err = l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg.Status), types.MsgConfirmed)
|
||||
}
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
_ "scroll-tech/rollup/cmd/event_watcher/app"
|
||||
_ "scroll-tech/rollup/cmd/gas_oracle/app"
|
||||
_ "scroll-tech/rollup/cmd/rollup_relayer/app"
|
||||
_ "scroll-tech/bridge/cmd/event_watcher/app"
|
||||
_ "scroll-tech/bridge/cmd/gas_oracle/app"
|
||||
_ "scroll-tech/bridge/cmd/msg_relayer/app"
|
||||
_ "scroll-tech/bridge/cmd/rollup_relayer/app"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
cutils "scroll-tech/common/utils"
|
||||
@@ -20,11 +21,12 @@ func testProcessStart(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
rollupApp.RunApp(t, cutils.EventWatcherApp)
|
||||
rollupApp.RunApp(t, cutils.GasOracleApp)
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp)
|
||||
bridgeApp.RunApp(t, cutils.EventWatcherApp)
|
||||
bridgeApp.RunApp(t, cutils.GasOracleApp)
|
||||
bridgeApp.RunApp(t, cutils.MessageRelayerApp)
|
||||
bridgeApp.RunApp(t, cutils.RollupRelayerApp)
|
||||
|
||||
rollupApp.WaitExit()
|
||||
bridgeApp.WaitExit()
|
||||
}
|
||||
|
||||
func testProcessStartEnableMetrics(t *testing.T) {
|
||||
@@ -34,17 +36,22 @@ func testProcessStartEnableMetrics(t *testing.T) {
|
||||
port, err := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
assert.NoError(t, err)
|
||||
svrPort := strconv.FormatInt(port.Int64()+50000, 10)
|
||||
rollupApp.RunApp(t, cutils.EventWatcherApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
|
||||
bridgeApp.RunApp(t, cutils.EventWatcherApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
|
||||
|
||||
port, err = rand.Int(rand.Reader, big.NewInt(2000))
|
||||
assert.NoError(t, err)
|
||||
svrPort = strconv.FormatInt(port.Int64()+50000, 10)
|
||||
rollupApp.RunApp(t, cutils.GasOracleApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
|
||||
bridgeApp.RunApp(t, cutils.GasOracleApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
|
||||
|
||||
port, err = rand.Int(rand.Reader, big.NewInt(2000))
|
||||
assert.NoError(t, err)
|
||||
svrPort = strconv.FormatInt(port.Int64()+50000, 10)
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
|
||||
bridgeApp.RunApp(t, cutils.MessageRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
|
||||
|
||||
rollupApp.WaitExit()
|
||||
port, err = rand.Int(rand.Reader, big.NewInt(2000))
|
||||
assert.NoError(t, err)
|
||||
svrPort = strconv.FormatInt(port.Int64()+50000, 10)
|
||||
bridgeApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
|
||||
|
||||
bridgeApp.WaitExit()
|
||||
}
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
@@ -27,13 +27,14 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
|
||||
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// add some blocks to db
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
@@ -20,7 +20,7 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
|
||||
FROM scrolltech/go-rust-builder:go-1.19-rust-nightly-2022-12-10 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
|
||||
@@ -3,7 +3,7 @@ FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
|
||||
@@ -3,7 +3,7 @@ FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
@@ -18,7 +18,7 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
|
||||
cd /src/bridge/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
|
||||
|
||||
# Pull event_watcher into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
@@ -3,7 +3,7 @@ FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
@@ -18,7 +18,7 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
|
||||
cd /src/bridge/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
|
||||
|
||||
# Pull gas_oracle into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
28
build/dockerfiles/msg_relayer.Dockerfile
Normal file
28
build/dockerfiles/msg_relayer.Dockerfile
Normal file
@@ -0,0 +1,28 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build msg_relayer
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge/cmd/msg_relayer/ && go build -v -p 4 -o /bin/msg_relayer
|
||||
|
||||
# Pull msg_relayer into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/msg_relayer /bin/
|
||||
|
||||
ENTRYPOINT ["msg_relayer"]
|
||||
5
build/dockerfiles/msg_relayer.Dockerfile.dockerignore
Normal file
5
build/dockerfiles/msg_relayer.Dockerfile.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -3,7 +3,7 @@ FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
|
||||
@@ -3,7 +3,7 @@ FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
@@ -18,7 +18,7 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
|
||||
cd /src/bridge/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
|
||||
|
||||
# Pull rollup_relayer into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
@@ -3,7 +3,7 @@ set -uex
|
||||
|
||||
profile_name=$1
|
||||
|
||||
exclude_dirs=("scroll-tech/rollup/cmd" "scroll-tech/rollup/tests" "scroll-tech/rollup/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
|
||||
exclude_dirs=("scroll-tech/bridge/cmd" "scroll-tech/bridge/tests" "scroll-tech/bridge/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
|
||||
|
||||
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
|
||||
coverpkg="scroll-tech/${profile_name}"
|
||||
|
||||
@@ -6,7 +6,7 @@ flag_management:
|
||||
default_rules:
|
||||
carryforward: true
|
||||
individual_flags:
|
||||
- name: rollup
|
||||
- name: bridge
|
||||
statuses:
|
||||
- type: project
|
||||
target: auto
|
||||
|
||||
@@ -5,9 +5,8 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
)
|
||||
|
||||
@@ -27,9 +26,8 @@ type Cmd struct {
|
||||
name string
|
||||
args []string
|
||||
|
||||
isRunning uint64
|
||||
cmd *exec.Cmd
|
||||
app *exec.Cmd
|
||||
mu sync.Mutex
|
||||
cmd *exec.Cmd
|
||||
|
||||
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
|
||||
|
||||
@@ -40,23 +38,13 @@ type Cmd struct {
|
||||
}
|
||||
|
||||
// NewCmd create Cmd instance.
|
||||
func NewCmd(name string, params ...string) *Cmd {
|
||||
cmd := &Cmd{
|
||||
func NewCmd(name string, args ...string) *Cmd {
|
||||
return &Cmd{
|
||||
checkFuncs: cmap.New(),
|
||||
name: name,
|
||||
args: params,
|
||||
args: args,
|
||||
ErrChan: make(chan error, 10),
|
||||
cmd: exec.Command(name, params...),
|
||||
app: &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: append([]string{name}, params...),
|
||||
},
|
||||
}
|
||||
cmd.cmd.Stdout = cmd
|
||||
cmd.cmd.Stderr = cmd
|
||||
cmd.app.Stdout = cmd
|
||||
cmd.app.Stderr = cmd
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RegistFunc register check func
|
||||
@@ -70,14 +58,15 @@ func (c *Cmd) UnRegistFunc(key string) {
|
||||
}
|
||||
|
||||
func (c *Cmd) runCmd() {
|
||||
fmt.Println("cmd:", append([]string{c.name}, c.args...))
|
||||
if atomic.CompareAndSwapUint64(&c.isRunning, 0, 1) {
|
||||
c.ErrChan <- c.cmd.Run()
|
||||
}
|
||||
cmd := exec.Command(c.args[0], c.args[1:]...) //nolint:gosec
|
||||
cmd.Stdout = c
|
||||
cmd.Stderr = c
|
||||
c.ErrChan <- cmd.Run()
|
||||
}
|
||||
|
||||
// RunCmd parallel running when parallel is true.
|
||||
func (c *Cmd) RunCmd(parallel bool) {
|
||||
fmt.Println("cmd:", c.args)
|
||||
if parallel {
|
||||
go c.runCmd()
|
||||
} else {
|
||||
|
||||
@@ -3,45 +3,41 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// IsRunning 1 started, 0 not started.
|
||||
func (c *Cmd) IsRunning() bool {
|
||||
return atomic.LoadUint64(&c.isRunning) == 1
|
||||
}
|
||||
|
||||
func (c *Cmd) runApp() {
|
||||
fmt.Println("cmd:", append([]string{c.name}, c.args...))
|
||||
if atomic.CompareAndSwapUint64(&c.isRunning, 0, 1) {
|
||||
c.ErrChan <- c.app.Run()
|
||||
}
|
||||
}
|
||||
|
||||
// RunApp exec's the current binary using name as argv[0] which will trigger the
|
||||
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
|
||||
func (c *Cmd) RunApp(waitResult func() bool) {
|
||||
fmt.Println("cmd: ", append([]string{c.name}, c.args...))
|
||||
cmd := &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: append([]string{c.name}, c.args...),
|
||||
Stderr: c,
|
||||
Stdout: c,
|
||||
}
|
||||
if waitResult != nil {
|
||||
go func() {
|
||||
c.runApp()
|
||||
_ = cmd.Run()
|
||||
}()
|
||||
waitResult()
|
||||
} else {
|
||||
c.runApp()
|
||||
_ = cmd.Run()
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.cmd = cmd
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WaitExit wait util process exit.
|
||||
func (c *Cmd) WaitExit() {
|
||||
if atomic.LoadUint64(&c.isRunning) == 0 {
|
||||
return
|
||||
}
|
||||
// Wait all the check functions are finished, interrupt loop when appear error.
|
||||
var err error
|
||||
for err == nil && !c.checkFuncs.IsEmpty() {
|
||||
@@ -56,18 +52,20 @@ func (c *Cmd) WaitExit() {
|
||||
}
|
||||
|
||||
// Send interrupt signal.
|
||||
_ = c.app.Process.Signal(os.Interrupt)
|
||||
// should use `_ = c.app.Process.Wait()` here, but we have some bugs in coordinator's graceful exit,
|
||||
c.mu.Lock()
|
||||
_ = c.cmd.Process.Signal(os.Interrupt)
|
||||
// should use `_ = c.cmd.Process.Wait()` here, but we have some bugs in coordinator's graceful exit,
|
||||
// so we use `Kill` as a temp workaround. And since `WaitExit` is only used in integration tests, so
|
||||
// it won't really affect our functionalities.
|
||||
if err = c.app.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
_ = c.app.Process.Kill()
|
||||
}
|
||||
_ = c.cmd.Process.Kill()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Interrupt send interrupt signal.
|
||||
func (c *Cmd) Interrupt() {
|
||||
c.ErrChan <- c.app.Process.Signal(os.Interrupt)
|
||||
c.mu.Lock()
|
||||
c.ErrChan <- c.cmd.Process.Signal(os.Interrupt)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WaitResult return true when get the keyword during timeout.
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCmd(t *testing.T) {
|
||||
app := cmd.NewCmd("date", "+%Y-%m-%d")
|
||||
app := cmd.NewCmd("curTime", "date", "+%Y-%m-%d")
|
||||
|
||||
tm := time.Now()
|
||||
curTime := fmt.Sprintf("%d-%02d-%02d", tm.Year(), tm.Month(), tm.Day())
|
||||
|
||||
@@ -27,7 +27,6 @@ var (
|
||||
|
||||
// AppAPI app interface.
|
||||
type AppAPI interface {
|
||||
IsRunning() bool
|
||||
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
|
||||
RunApp(waitResult func() bool)
|
||||
WaitExit()
|
||||
|
||||
@@ -36,7 +36,7 @@ func NewImgDB(image, password, dbName string, port int) ImgInstance {
|
||||
dbName: dbName,
|
||||
port: port,
|
||||
}
|
||||
img.cmd = cmd.NewCmd("docker", img.prepare()...)
|
||||
img.cmd = cmd.NewCmd(img.name, img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func (i *ImgDB) IsRunning() bool {
|
||||
}
|
||||
|
||||
func (i *ImgDB) prepare() []string {
|
||||
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
cmd := []string{"docker", "run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
envs := []string{
|
||||
"-e", "POSTGRES_PASSWORD=" + i.password,
|
||||
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
|
||||
|
||||
@@ -42,7 +42,7 @@ func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
|
||||
httpPort: hPort,
|
||||
wsPort: wPort,
|
||||
}
|
||||
img.cmd = cmd.NewCmd("docker", img.params()...)
|
||||
img.cmd = cmd.NewCmd(img.name, img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
@@ -149,8 +149,8 @@ func (i *ImgGeth) Stop() error {
|
||||
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
|
||||
}
|
||||
|
||||
func (i *ImgGeth) params() []string {
|
||||
cmds := []string{"run", "--rm", "--name", i.name}
|
||||
func (i *ImgGeth) prepare() []string {
|
||||
cmds := []string{"docker", "run", "--rm", "--name", i.name}
|
||||
var ports []string
|
||||
if i.httpPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM scrolltech/l2geth:scroll-v4.3.55
|
||||
FROM scrolltech/l2geth:scroll-v4.3.34
|
||||
|
||||
RUN mkdir -p /l2geth/keystore
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
@@ -117,7 +117,7 @@ require (
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
|
||||
@@ -434,8 +434,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc h1:eK3NOpjgm/b2TQ6rYqWx92Zri0kBuxf6gKjjsVxWKr8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28 h1:CECBTWhZ5NGAn8lGFB4ooRAYxZns8PXoX8kTR/14c04=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
|
||||
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
@@ -576,8 +576,8 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user