Compare commits

..

8 Commits

Author SHA1 Message Date
Péter Garamvölgyi
ff30622314 feat: auto-finalize batches on Alpha testnet (#791) 2023-08-15 15:41:54 +08:00
HAOYUatHZ
4bca06a4c1 Merge remote-tracking branch 'origin/develop' into alpha 2023-05-09 15:52:48 +08:00
Péter Garamvölgyi
071a7772dd fix already executed revert message (#407)
Co-authored-by: maskpp <maskpp266@gmail.com>
2023-04-03 16:19:40 +02:00
Péter Garamvölgyi
9d61149311 Merge tag 'v3.0.1' into alpha
fetch block transaction data instead of trace
2023-04-03 15:32:10 +02:00
HAOYUatHZ
0b607507b7 Merge pull request #381 from scroll-tech/develop
merge `develop` into `alpha`
2023-03-21 21:36:04 +08:00
HAOYUatHZ
462e85e591 Merge pull request #372 from scroll-tech/develop
merge develop into alpha
2023-03-18 16:24:42 +08:00
HAOYUatHZ
f46391c92b Merge pull request #370 from scroll-tech/develop
merge `develop` into `alpha`
2023-03-16 10:29:48 +08:00
HAOYUatHZ
10cececb60 Merge pull request #367 from scroll-tech/develop
merge develop into alpha
2023-03-15 09:07:27 +08:00
487 changed files with 56430 additions and 67323 deletions

View File

@@ -1,34 +1,7 @@
### Purpose or design rationale of this PR
*Describe your change. Make sure to answer these three questions: What does this PR do? Why does it do it? How does it do it?*
1. Purpose or design rationale of this PR
### PR title
Your PR title must follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) (as we are doing squash merge for each PR), so it must start with one of the following [types](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#type):
- [ ] build: Changes that affect the build system or external dependencies (example scopes: yarn, eslint, typescript)
- [ ] ci: Changes to our CI configuration files and scripts (example scopes: vercel, github, cypress)
- [ ] docs: Documentation-only changes
- [ ] feat: A new feature
- [ ] fix: A bug fix
- [ ] perf: A code change that improves performance
- [ ] refactor: A code change that doesn't fix a bug, or add a feature, or improves performance
- [ ] style: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc)
- [ ] test: Adding missing tests or correcting existing tests
2. Does this PR involve a new deployment, and involve a new git tag & docker image tag? If so, has `tag` in `common/version.go` been updated?
### Deployment tag versioning
Has `tag` in `common/version.go` been updated?
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
- [ ] Yes
### Breaking change label
Does this PR have the `breaking-change` label?
- [ ] No, this PR is not a breaking change
- [ ] Yes
3. Is this PR a breaking change? If so, have it been attached a `breaking-change` label?

View File

@@ -9,73 +9,23 @@ on:
- alpha
paths:
- 'bridge/**'
- 'common/**'
- 'database/**'
- '.github/workflows/bridge.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
branches:
- main
- staging
- develop
- alpha
paths:
- 'bridge/**'
- 'common/**'
- 'database/**'
- '.github/workflows/bridge.yml'
defaults:
run:
working-directory: 'bridge'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint
working-directory: 'bridge'
run: |
rm -rf $HOME/.cache/golangci-lint
make mock_abi
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
run: goimports -local scroll-tech/bridge/ -w .
working-directory: 'bridge'
- name: Run go mod tidy
run: go mod tidy
working-directory: 'bridge'
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'bridge'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
@@ -92,26 +42,31 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
- name: Lint
run: |
make dev_docker
make -C bridge mock_abi
- name: Build bridge binaries
working-directory: 'bridge'
run: |
make bridge_bins
- name: Test bridge packages
working-directory: 'bridge'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
rm -rf $HOME/.cache/golangci-lint
make mock_abi
make lint
goimports-lint:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
flags: bridge
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/bridge/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
# docker-build:
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code

View File

@@ -1,80 +0,0 @@
name: BridgeHistoryAPI
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'bridge-history-api/**'
- '.github/workflows/bridge_history_api.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'bridge-history-api/**'
- '.github/workflows/bridge_history_api.yml'
defaults:
run:
working-directory: 'bridge-history-api'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
make test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: bridge-history-api
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/bridge-history-api/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -11,18 +11,21 @@ on:
- 'common/**'
- '.github/workflows/common.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
branches:
- main
- staging
- develop
- alpha
paths:
- 'common/**'
- '.github/workflows/common.yml'
defaults:
run:
working-directory: 'common'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
@@ -33,7 +36,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo
@@ -41,37 +44,10 @@ jobs:
with:
workspaces: "common/libzkp/impl -> target"
- name: Lint
working-directory: 'common'
run: |
rm -rf $HOME/.cache/golangci-lint
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
working-directory: 'common'
run: goimports -local scroll-tech/common/ -w .
- name: Run go mod tidy
working-directory: 'common'
run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'common'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
@@ -80,24 +56,13 @@ jobs:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/common/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
make dev_docker
- name: Test common packages
working-directory: 'common'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: common
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -2,20 +2,10 @@ name: Contracts
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'contracts/**'
- '.github/workflows/contracts.yaml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'contracts/**'
- '.github/workflows/contracts.yaml'
@@ -26,7 +16,6 @@ defaults:
jobs:
foundry:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
@@ -39,7 +28,7 @@ jobs:
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly
- name: Setup LCOV
uses: hrishikesh-kadam/setup-lcov@v1
@@ -95,7 +84,6 @@ jobs:
update-comment: true
hardhat:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:

View File

@@ -9,24 +9,23 @@ on:
- alpha
paths:
- 'coordinator/**'
- 'common/**'
- 'database/**'
- '.github/workflows/coordinator.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
branches:
- main
- staging
- develop
- alpha
paths:
- 'coordinator/**'
- 'common/**'
- 'database/**'
- '.github/workflows/coordinator.yml'
defaults:
run:
working-directory: 'coordinator'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
@@ -37,41 +36,33 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
working-directory: 'coordinator'
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
working-directory: 'coordinator'
run: goimports -local scroll-tech/coordinator/ -w .
- name: Run go mod tidy
working-directory: 'coordinator'
run: go mod tidy
- run: goimports -local scroll-tech/coordinator/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'coordinator'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
# docker-build:
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
@@ -86,35 +77,3 @@ jobs:
# push: false
# # cache-from: type=gha,scope=${{ github.workflow }}
# # cache-to: type=gha,scope=${{ github.workflow }}
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
- name: Test coordinator packages
working-directory: 'coordinator'
run: |
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: coordinator

View File

@@ -9,62 +9,23 @@ on:
- alpha
paths:
- 'database/**'
- 'common/**'
- '.github/workflows/database.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
branches:
- main
- staging
- develop
- alpha
paths:
- 'database/**'
- 'common/**'
- '.github/workflows/database.yml'
defaults:
run:
working-directory: 'database'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
working-directory: 'database'
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
working-directory: 'database'
run: goimports -local scroll-tech/database/ -w .
- name: Run go mod tidy
working-directory: 'database'
run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'database'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
@@ -73,24 +34,26 @@ jobs:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
- name: Lint
run: |
make dev_docker
- name: Test database packages
working-directory: 'database'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
flags: database
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/database/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -18,6 +18,15 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator.Dockerfile
push: true
tags: scrolltech/coordinator:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push event_watcher docker
uses: docker/build-push-action@v2
with:
@@ -54,30 +63,3 @@ jobs:
tags: scrolltech/rollup-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push bridgehistoryapi-cross-msg-fetcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-cross-msg-fetcher.Dockerfile
push: true
tags: scrolltech/bridgehistoryapi-cross-msg-fetcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push bridgehistoryapi-server docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-server.Dockerfile
push: true
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator.Dockerfile
push: true
tags: scrolltech/coordinator:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

View File

@@ -1,43 +0,0 @@
name: Integration
on:
push:
branches:
- main
- staging
- develop
- alpha
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
jobs:
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
make -C bridge mock_abi
make -C common/bytecode all
- name: Run integration tests
run: |
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...

View File

@@ -1,59 +0,0 @@
name: Intermediate Docker
on:
workflow_dispatch:
inputs:
GO_VERSION:
description: 'Go version'
required: true
type: string
default: '1.19'
RUST_VERSION:
description: 'Rust toolchain version'
required: true
type: string
default: 'nightly-2022-12-10'
PYTHON_VERSION:
description: 'Python version'
required: false
type: string
default: '3.10'
CUDA_VERSION:
description: 'Cuda version'
required: false
type: string
default: '11.7.1'
defaults:
run:
working-directory: 'build/dockerfiles/intermediate'
jobs:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build
run: |
make all
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Publish
run: |
make publish
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}

View File

@@ -1,80 +0,0 @@
name: ProverStatsAPI
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover-stats-api/**'
- '.github/workflows/prover_stats_api.yml'
defaults:
run:
working-directory: 'prover-stats-api'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
make test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: prover-stats-api
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/prover-stats-api/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -11,11 +11,11 @@ on:
- 'roller/**'
- '.github/workflows/roller.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
branches:
- main
- staging
- develop
- alpha
paths:
- 'roller/**'
- '.github/workflows/roller.yml'
@@ -26,26 +26,6 @@ defaults:
jobs:
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: roller
compile:
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
@@ -56,7 +36,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo
@@ -66,14 +46,16 @@ jobs:
- name: Test
run: |
make roller
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib
export CHAIN_ID=534353
go test -v ./...
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
@@ -81,13 +63,12 @@ jobs:
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.19.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports

99
Jenkinsfile vendored Normal file
View File

@@ -0,0 +1,99 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
pipeline {
agent any
options {
timeout (20)
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./coordinator/verifier/lib"
CHAIN_ID='534353'
// LOG_DOCKER = 'true'
}
stages {
stage('Build') {
parallel {
stage('Build Prerequisite') {
steps {
sh 'make dev_docker'
sh 'make -C bridge mock_abi'
sh 'make -C common/bytecode all'
}
}
stage('Check Bridge Compilation') {
steps {
sh 'make -C bridge bridge_bins'
}
}
stage('Check Coordinator Compilation') {
steps {
sh 'export PATH=/home/ubuntu/go/bin:$PATH'
sh 'make -C coordinator coordinator'
}
}
stage('Check Database Compilation') {
steps {
sh 'make -C database db_cli'
}
}
stage('Check Database Docker Build') {
steps {
sh 'make -C database docker'
}
}
}
}
stage('Parallel Test') {
parallel{
stage('Race test common package') {
steps {
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic scroll-tech/common/...'
}
}
stage('Race test bridge package') {
steps {
sh "cd ./bridge && ../build/run_tests.sh bridge"
}
}
stage('Race test coordinator package') {
steps {
sh "cd ./coordinator && ../build/run_tests.sh coordinator"
}
}
stage('Race test database package') {
steps {
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic scroll-tech/database/...'
}
}
stage('Integration test') {
steps {
sh 'go test -v -tags="mock_prover mock_verifier" -p 1 scroll-tech/integration-test/...'
}
}
}
}
stage('Compare Coverage') {
steps {
sh "./build/post-test-report-coverage.sh"
script {
currentBuild.result = 'SUCCESS'
}
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
}
}
}
post {
always {
publishCoverage adapters: [coberturaReportAdapter(path: 'cobertura.xml', thresholds: [[thresholdTarget: 'Aggregated Report', unhealthyThreshold: 40.0]])], checksName: '', sourceFileResolver: sourceFiles('NEVER_STORE')
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
}
}
}

View File

@@ -1,4 +1,6 @@
.PHONY: check update dev_docker build_test_docker run_test_docker clean
.PHONY: check update dev_docker clean
ZKP_VERSION=release-1220
help: ## Display this help message
@grep -h \
@@ -11,18 +13,15 @@ lint: ## The code's format and security checks.
make -C coordinator lint
make -C database lint
make -C roller lint
make -C bridge-history-api lint
update: ## update dependencies
go work sync
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll && go mod tidy
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@scroll && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .
goimports -local $(PWD)/database/ -w .
@@ -32,11 +31,15 @@ dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
docker build -t scroll_test_image -f ./build/dockerfiles/local_testing.Dockerfile $$(mktemp -d)
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app scroll_test_image
test_zkp: ## Test zkp prove and verify, roller/prover generates the proof and coordinator/verifier verifies it
mkdir -p test_params
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params19 -O ./test_params/params19
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params26 -O ./test_params/params26
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_seed -O test_seed
rm -rf ./roller/assets/test_params && mv test_params ./roller/assets/ && mv test_seed ./roller/assets/
cd ./roller && make test-gpu-prover
rm -rf ./coordinator/assets/test_params && mv ./roller/assets/test_params ./coordinator/assets/ && mv ./roller/assets/agg_proof ./coordinator/assets/
cd ./coordinator && make test-gpu-verifier
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -1,68 +1,12 @@
# Scroll Monorepo
[![codecov](https://codecov.io/gh/scroll-tech/scroll/branch/develop/graph/badge.svg?token=VJVHNQWGGW)](https://codecov.io/gh/scroll-tech/scroll)
[![Contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [![Bridge](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [![Coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [![Database](https://github.com/scroll-tech/scroll/actions/workflows/database.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [![Common](https://github.com/scroll-tech/scroll/actions/workflows/common.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [![Roller](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
## Prerequisites
+ Go 1.19
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ Hardhat / Foundry
+ Docker
+ go1.18
+ rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ hardhat / foundry
To run the tests, it is essential to first pull or build the required Docker images. Execute the following commands in the root directory of the repository to do this:
---
```bash
docker pull postgres
make dev_docker
```
## Testing Bridge & Coordinator
### For Non-Apple Silicon (M1/M2) Macs
Run the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/bridge/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
### For Apple Silicon (M1/M2) Macs
To run tests on Apple Silicon Macs, build and execute the Docker image as outlined below:
#### Build a Docker Image for Testing
Use the following command to build a Docker image:
```bash
make build_test_docker
```
This command builds a Docker image named `scroll_test_image` using the Dockerfile found at `./build/dockerfiles/local_test.Dockerfile`.
#### Run Docker Image
After the image is built, run a Docker container from it:
```bash
make run_test_docker
```
This command runs a Docker container named `scroll_test_container` from the `scroll_test_image` image. The container uses the host network and has access to the Docker socket and the current directory.
Once the Docker container is running, execute the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/bridge/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
## Testing Contracts
You can find the unit tests in [`<REPO_DIR>/contracts/src/test/`](/contracts/src/test/), and integration tests in [`<REPO_DIR>/contracts/integration-test/`](/contracts/integration-test/).
For more details on contracts, see [`/contracts`](/contracts).
For a more comprehensive doc, see [`docs/`](./docs).

0
assets/.gitkeep Normal file
View File

View File

@@ -1 +0,0 @@
/build/bin

View File

@@ -1,26 +0,0 @@
.PHONY: lint
REPO_ROOT_DIR=./..
IMAGE_VERSION=latest
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
bridgehistoryapi-db-cli:
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli
bridgehistoryapi-cross-msg-fetcher:
go build -o $(PWD)/build/bin/bridgehistoryapi-cross-msg-fetcher ./cmd/cross_msg_fetcher
bridgehistoryapi-server:
go build -o $(PWD)/build/bin/bridgehistoryapi-server ./cmd/backend_server
db-docker:
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
bridgehistoryapi-docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-cross-msg-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-cross-msg-fetcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-server:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-server.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile

View File

@@ -1 +0,0 @@
# bridge-history-api

File diff suppressed because one or more lines are too long

View File

@@ -1,134 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/iris-contrib/middleware/cors"
"github.com/kataras/iris/v12"
"github.com/kataras/iris/v12/mvc"
"github.com/urfave/cli/v2"
"gorm.io/gorm"
"bridge-history-api/config"
"bridge-history-api/controller"
"bridge-history-api/service"
cutils "bridge-history-api/utils"
"scroll-tech/common/database"
)
var (
app *cli.App
db *gorm.DB
subCtx context.Context
)
func pong(ctx iris.Context) {
_, err := ctx.WriteString("pong")
if err != nil {
log.Error("failed to write pong", "err", err)
}
}
func setupQueryByAddressHandler(backendApp *mvc.Application) {
// Register Dependencies.
backendApp.Register(
subCtx,
db,
service.NewHistoryService,
)
// Register Controllers.
backendApp.Handle(new(controller.QueryAddressController))
}
func setupQueryClaimableHandler(backendApp *mvc.Application) {
// Register Dependencies.
backendApp.Register(
subCtx,
db,
service.NewHistoryService,
)
// Register Controllers.
backendApp.Handle(new(controller.QueryClaimableController))
}
func setupQueryByHashHandler(backendApp *mvc.Application) {
backendApp.Register(
subCtx,
db,
service.NewHistoryService,
)
backendApp.Handle(new(controller.QueryHashController))
}
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "Scroll Bridge History Web Service"
app.Usage = "The Scroll Bridge History Web Service"
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
corsOptions := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"},
AllowCredentials: true,
})
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
dbCfg := &database.Config{
DriverName: cfg.DB.DriverName,
DSN: cfg.DB.DSN,
MaxOpenNum: cfg.DB.MaxOpenNum,
MaxIdleNum: cfg.DB.MaxIdleNum,
}
db, err = database.InitDB(dbCfg)
if err != nil {
log.Crit("failed to init db", "err", err)
}
defer func() {
if deferErr := database.CloseDB(db); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
subCtx = ctx.Context
bridgeApp := iris.New()
bridgeApp.UseRouter(corsOptions)
bridgeApp.Get("/ping", pong).Describe("healthcheck")
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler)
mvc.Configure(bridgeApp.Party("/api/txsbyhashes"), setupQueryByHashHandler)
mvc.Configure(bridgeApp.Party("/api/claimable"), setupQueryClaimableHandler)
// TODO: make debug mode configurable
err = bridgeApp.Listen(cfg.Server.HostPort, iris.WithLogLevel("debug"))
if err != nil {
log.Crit("can not start server", "err", err)
}
return nil
}
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,7 +0,0 @@
package main
import "bridge-history-api/cmd/backend_server/app"
func main() {
app.Run()
}

View File

@@ -1,152 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2"
"bridge-history-api/config"
"bridge-history-api/crossmsg"
"bridge-history-api/crossmsg/messageproof"
"bridge-history-api/orm"
cutils "bridge-history-api/utils"
"scroll-tech/common/database"
)
var (
app *cli.App
)
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "Scroll Bridge History API"
app.Usage = "The Scroll Bridge Web Backend"
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
defer cancel()
l1client, err := ethclient.Dial(cfg.L1.Endpoint)
if err != nil {
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
}
l2client, err := ethclient.Dial(cfg.L2.Endpoint)
if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
dbCfg := &database.Config{
DriverName: cfg.DB.DriverName,
DSN: cfg.DB.DSN,
MaxOpenNum: cfg.DB.MaxOpenNum,
MaxIdleNum: cfg.DB.MaxIdleNum,
}
db, err := database.InitDB(dbCfg)
if err != nil {
log.Crit("failed to init db", "err", err)
}
defer func() {
if deferErr := database.CloseDB(db); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
if err != nil {
log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
}
l1worker := &crossmsg.FetchEventWorker{F: crossmsg.L1FetchAndSaveEvents, G: crossmsg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"}
l2worker := &crossmsg.FetchEventWorker{F: crossmsg.L2FetchAndSaveEvents, G: crossmsg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"}
l1AddressList := []common.Address{
common.HexToAddress(cfg.L1.CustomERC20GatewayAddr),
common.HexToAddress(cfg.L1.ERC721GatewayAddr),
common.HexToAddress(cfg.L1.ERC1155GatewayAddr),
common.HexToAddress(cfg.L1.MessengerAddr),
common.HexToAddress(cfg.L1.ETHGatewayAddr),
common.HexToAddress(cfg.L1.StandardERC20Gateway),
common.HexToAddress(cfg.L1.WETHGatewayAddr),
}
l2AddressList := []common.Address{
common.HexToAddress(cfg.L2.CustomERC20GatewayAddr),
common.HexToAddress(cfg.L2.ERC721GatewayAddr),
common.HexToAddress(cfg.L2.ERC1155GatewayAddr),
common.HexToAddress(cfg.L2.MessengerAddr),
common.HexToAddress(cfg.L2.ETHGatewayAddr),
common.HexToAddress(cfg.L2.StandardERC20Gateway),
common.HexToAddress(cfg.L2.WETHGatewayAddr),
}
l1crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, crossmsg.L1ReorgHandling)
if err != nil {
log.Crit("failed to create l1 cross message fetcher", "error", err)
}
go l1crossMsgFetcher.Start()
defer l1crossMsgFetcher.Stop()
l2crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, crossmsg.L2ReorgHandling)
if err != nil {
log.Crit("failed to create l2 cross message fetcher", "error", err)
}
go l2crossMsgFetcher.Start()
defer l2crossMsgFetcher.Stop()
CrossMsgOrm := orm.NewCrossMsg(db)
// BlockTimestamp fetcher for l1 and l2
l1BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, CrossMsgOrm.UpdateL1BlockTimestamp, CrossMsgOrm.GetL1EarliestNoBlockTimestampHeight)
go l1BlockTimeFetcher.Start()
defer l1BlockTimeFetcher.Stop()
l2BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, CrossMsgOrm.UpdateL2BlockTimestamp, CrossMsgOrm.GetL2EarliestNoBlockTimestampHeight)
go l2BlockTimeFetcher.Start()
defer l2BlockTimeFetcher.Stop()
// Proof updater and batch fetcher
l2msgProofUpdater := messageproof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db)
batchFetcher := crossmsg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater)
go batchFetcher.Start()
defer batchFetcher.Stop()
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,7 +0,0 @@
package main
import "bridge-history-api/cmd/cross_msg_fetcher/app"
func main() {
app.Run()
}

View File

@@ -1,75 +0,0 @@
package app
import (
"fmt"
"os"
"github.com/urfave/cli/v2"
"bridge-history-api/utils"
)
var (
// Set up database app info.
app *cli.App
)
func init() {
app = cli.NewApp()
// Set up database app info.
app.Name = "db_cli"
app.Usage = "The Scroll Bridge-history-api DB CLI"
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
app.Commands = []*cli.Command{
{
Name: "reset",
Usage: "Clean and reset database.",
Action: resetDB,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "status",
Usage: "Check migration status.",
Action: checkDBStatus,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "version",
Usage: "Display the current database version.",
Action: dbVersion,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "migrate",
Usage: "Migrate the database to the latest version.",
Action: migrateDB,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "rollback",
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
Action: rollbackDB,
Flags: []cli.Flag{
&utils.ConfigFileFlag,
&cli.IntFlag{
Name: "version",
Usage: "Rollback to the specified version.",
Value: 0,
}},
},
}
}
// Run database cmd instance.
func Run() {
// RunApp the db_cli.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,126 +0,0 @@
package app
import (
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2"
"gorm.io/gorm"
"bridge-history-api/config"
"bridge-history-api/orm/migrate"
"bridge-history-api/utils"
"scroll-tech/common/database"
)
func getConfig(ctx *cli.Context) (*config.Config, error) {
file := ctx.String(utils.ConfigFileFlag.Name)
dbCfg, err := config.NewConfig(file)
if err != nil {
return nil, err
}
return dbCfg, nil
}
func initDB(dbCfg *config.DBConfig) (*gorm.DB, error) {
cfg := &database.Config{
DriverName: dbCfg.DriverName,
DSN: dbCfg.DSN,
MaxOpenNum: dbCfg.MaxOpenNum,
MaxIdleNum: dbCfg.MaxIdleNum,
}
return database.InitDB(cfg)
}
// resetDB clean or reset database.
func resetDB(ctx *cli.Context) error {
cfg, err := getConfig(ctx)
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
err = migrate.ResetDB(db)
if err != nil {
return err
}
log.Info("successful to reset")
return nil
}
// checkDBStatus check db status
func checkDBStatus(ctx *cli.Context) error {
cfg, err := getConfig(ctx)
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
return migrate.Status(db)
}
// dbVersion return the latest version
func dbVersion(ctx *cli.Context) error {
cfg, err := getConfig(ctx)
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
version, err := migrate.Current(db)
log.Info("show database version", "db version", version)
return err
}
// migrateDB migrate db
func migrateDB(ctx *cli.Context) error {
cfg, err := getConfig(ctx)
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
return migrate.Migrate(db)
}
// rollbackDB rollback db by version
func rollbackDB(ctx *cli.Context) error {
cfg, err := getConfig(ctx)
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
version := ctx.Int64("version")
return migrate.Rollback(db, &version)
}

View File

@@ -1,7 +0,0 @@
package main
import "bridge-history-api/cmd/db_cli/app"
func main() {
app.Run()
}

View File

@@ -1,41 +0,0 @@
{
"batchInfoFetcher": {
"batchIndexStartBlock": 9091265,
"ScrollChainAddr": "0xcD00DB804C819175B381b2B44Aa16A391c8a01D6"
},
"l1": {
"confirmation": 64,
"endpoint": "https://rpc.ankr.com/eth_goerli",
"startHeight": 9090194 ,
"blockTime": 10,
"MessengerAddr": "0x326517Eb8eB1Ce5eaB5b513C2e9A24839b402d90",
"ETHGatewayAddr": "0x8305cB7B8448677736095965B63d7431017328fe",
"WETHGatewayAddr": "0xe3bA3c60d99a2d9a5f817734bC85353470b23931",
"StandardERC20Gateway": "0x16c1079B27eD9c363B7D08aC5Ae937A398972A5C",
"CustomERC20GatewayAddr": "0x61f08caD3d6F77801167d3bA8669433701586643",
"ERC721GatewayAddr": "0x4A73D25A4C99CB912acaf6C5B5e554f2982201c5",
"ERC1155GatewayAddr": "0xa3F5DD3033698c2832C53f3C3Fe6E062F58cD808"
},
"l2": {
"confirmation": 1,
"endpoint": "http://staging-l2geth-rpc0.scroll.tech:8545",
"blockTime": 3,
"startHeight": 0,
"CustomERC20GatewayAddr": "0x905db21f836749fEeD12de781afc4A5Ab4Dd0d51",
"ERC721GatewayAddr": "0xC53D835514780664BCd7eCfcE7c2E5d9554dc41B",
"StandardERC20Gateway": "0x90271634BCB020e06ea4840C3f7aa61b8F860651",
"MessengerAddr": "0xE8b0956Ac75c65Aa1669e83888DA13afF2E108f4",
"ETHGatewayAddr": "0xD5938590D5dD8ce95812D4D515a219C12C551D67",
"WETHGatewayAddr": "0xb0aaA582564fade4232a16fdB1383004A6A7247F",
"ERC1155GatewayAddr": "0x4f33B1655619c2C0B7C450128Df760B4365Cb549"
},
"db": {
"dsn": "postgres://postgres:1234@localhost:5444/test?sslmode=disable",
"driverName": "postgres",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"server": {
"hostPort": "0.0.0.0:20006"
}
}

View File

@@ -1,71 +0,0 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
)
// BatchInfoFetcherConfig is the configuration of BatchInfoFetcher
type BatchInfoFetcherConfig struct {
BatchIndexStartBlock uint64 `json:"batchIndexStartBlock"`
ScrollChainAddr string `json:"ScrollChainAddr"`
}
// DBConfig db config
type DBConfig struct {
// data source name
DSN string `json:"dsn"`
DriverName string `json:"driverName"`
MaxOpenNum int `json:"maxOpenNum"`
MaxIdleNum int `json:"maxIdleNum"`
}
// LayerConfig is the configuration of Layer1/Layer2
type LayerConfig struct {
Confirmation uint64 `json:"confirmation"`
Endpoint string `json:"endpoint"`
StartHeight uint64 `json:"startHeight"`
BlockTime int64 `json:"blockTime"`
MessengerAddr string `json:"MessengerAddr"`
ETHGatewayAddr string `json:"ETHGatewayAddr"`
WETHGatewayAddr string `json:"WETHGatewayAddr"`
StandardERC20Gateway string `json:"StandardERC20Gateway"`
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
}
// ServerConfig is the configuration of the bridge history backend server port
type ServerConfig struct {
HostPort string `json:"hostPort"`
}
// Config is the configuration of the bridge history backend
type Config struct {
// chain config
L1 *LayerConfig `json:"l1"`
L2 *LayerConfig `json:"l2"`
// data source name
DB *DBConfig `json:"db"`
Server *ServerConfig `json:"server"`
BatchInfoFetcher *BatchInfoFetcherConfig `json:"batchInfoFetcher"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &Config{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -1,60 +0,0 @@
package controller
import (
"bridge-history-api/model"
"bridge-history-api/service"
"github.com/ethereum/go-ethereum/common"
)
// QueryAddressController contains the query by address service
type QueryAddressController struct {
Service service.HistoryService
}
// QueryHashController contains the query by hash service
type QueryHashController struct {
Service service.HistoryService
}
// QueryClaimableController contains the query claimable txs service
type QueryClaimableController struct {
Service service.HistoryService
}
// Get defines the http get method behavior for QueryClaimableController
func (c *QueryClaimableController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
txs, total, err := c.Service.GetClaimableTxsByAddress(common.HexToAddress(req.Address), req.Offset, req.Limit)
if err != nil {
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
}
return &model.QueryByAddressResponse{Message: "ok",
Data: &model.Data{
Result: txs,
Total: total,
}}, nil
}
// Get defines the http get method behavior for QueryAddressController
func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), req.Offset, req.Limit)
if err != nil {
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
}
return &model.QueryByAddressResponse{Message: "ok",
Data: &model.Data{
Result: message,
Total: total,
}}, nil
}
// Post defines the http post method behavior for QueryHashController
func (c *QueryHashController) Post(req model.QueryByHashRequest) (*model.QueryByHashResponse, error) {
result, err := c.Service.GetTxsByHashes(req.Txs)
if err != nil {
return &model.QueryByHashResponse{Message: "500", Data: &model.Data{}}, err
}
return &model.QueryByHashResponse{Message: "ok", Data: &model.Data{Result: result}}, nil
}

View File

@@ -1,113 +0,0 @@
package crossmsg
import (
"context"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
"bridge-history-api/crossmsg/messageproof"
"bridge-history-api/orm"
"bridge-history-api/utils"
)
// BatchInfoFetcher fetches batch info from l1 chain and update db
type BatchInfoFetcher struct {
ctx context.Context
scrollChainAddr common.Address
batchInfoStartNumber uint64
confirmation uint64
blockTimeInSec int
client *ethclient.Client
db *gorm.DB
rollupOrm *orm.RollupBatch
msgProofUpdater *messageproof.MsgProofUpdater
}
// NewBatchInfoFetcher creates a new BatchInfoFetcher instance
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db *gorm.DB, msgProofUpdater *messageproof.MsgProofUpdater) *BatchInfoFetcher {
return &BatchInfoFetcher{
ctx: ctx,
scrollChainAddr: scrollChainAddr,
batchInfoStartNumber: batchInfoStartNumber,
confirmation: confirmation,
blockTimeInSec: blockTimeInSec,
client: client,
db: db,
rollupOrm: orm.NewRollupBatch(db),
msgProofUpdater: msgProofUpdater,
}
}
// Start the BatchInfoFetcher
func (b *BatchInfoFetcher) Start() {
log.Info("BatchInfoFetcher Start")
// Fetch batch info at beginning
// Then start msg proof updater after db have some bridge batch
err := b.fetchBatchInfo()
if err != nil {
log.Error("fetch batch info at beginning failed: ", "err", err)
}
go b.msgProofUpdater.Start()
go func() {
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
for {
select {
case <-b.ctx.Done():
tick.Stop()
return
case <-tick.C:
err := b.fetchBatchInfo()
if err != nil {
log.Error("fetch batch info failed: ", "err", err)
}
}
}
}()
}
// Stop the BatchInfoFetcher and call msg proof updater to stop
func (b *BatchInfoFetcher) Stop() {
log.Info("BatchInfoFetcher Stop")
b.msgProofUpdater.Stop()
}
func (b *BatchInfoFetcher) fetchBatchInfo() error {
number, err := utils.GetSafeBlockNumber(b.ctx, b.client, b.confirmation)
if err != nil {
log.Error("Can not get latest block number: ", "err", err)
return err
}
latestBatchHeight, err := b.rollupOrm.GetLatestRollupBatchProcessedHeight(b.ctx)
if err != nil {
log.Error("Can not get latest BatchInfo: ", "err", err)
return err
}
var startHeight uint64
if latestBatchHeight == 0 {
log.Info("no batch record in database, start from batchInfoStartNumber", "batchInfoStartNumber", b.batchInfoStartNumber)
startHeight = b.batchInfoStartNumber
} else {
startHeight = latestBatchHeight + 1
}
for from := startHeight; number >= from; from += fetchLimit {
to := from + fetchLimit - 1
// number - confirmation can never less than 0 since the for loop condition
// but watch out the overflow
if to > number {
to = number
}
// filter logs to fetch batches
err = FetchAndSaveBatchIndex(b.ctx, b.client, b.db, int64(from), int64(to), b.scrollChainAddr)
if err != nil {
log.Error("Can not fetch and save from chain: ", "err", err)
return err
}
}
return nil
}

View File

@@ -1,85 +0,0 @@
package crossmsg
import (
"context"
"math/big"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
// GetEarliestNoBlockTimestampHeightFunc is a function type that gets the earliest record without block timestamp from database
type GetEarliestNoBlockTimestampHeightFunc func(ctx context.Context) (uint64, error)
// UpdateBlockTimestampFunc is a function type that updates block timestamp into database
type UpdateBlockTimestampFunc func(ctx context.Context, height uint64, timestamp time.Time) error
// BlockTimestampFetcher fetches block timestamp from blockchain and saves them to database
type BlockTimestampFetcher struct {
ctx context.Context
confirmation uint64
blockTimeInSec int
client *ethclient.Client
updateBlockTimestampFunc UpdateBlockTimestampFunc
getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc
}
// NewBlockTimestampFetcher creates a new BlockTimestampFetcher instance
func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTimeInSec int, client *ethclient.Client, updateBlockTimestampFunc UpdateBlockTimestampFunc, getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc) *BlockTimestampFetcher {
return &BlockTimestampFetcher{
ctx: ctx,
confirmation: confirmation,
blockTimeInSec: blockTimeInSec,
client: client,
getEarliestNoBlockTimestampHeightFunc: getEarliestNoBlockTimestampHeightFunc,
updateBlockTimestampFunc: updateBlockTimestampFunc,
}
}
// Start the BlockTimestampFetcher
func (b *BlockTimestampFetcher) Start() {
go func() {
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
for {
select {
case <-b.ctx.Done():
tick.Stop()
return
case <-tick.C:
number, err := b.client.BlockNumber(b.ctx)
if err != nil {
log.Error("Can not get latest block number", "err", err)
continue
}
startHeight, err := b.getEarliestNoBlockTimestampHeightFunc(b.ctx)
if err != nil {
log.Error("Can not get latest record without block timestamp", "err", err)
continue
}
for height := startHeight; number >= height+b.confirmation && height > 0; {
block, err := b.client.HeaderByNumber(b.ctx, new(big.Int).SetUint64(height))
if err != nil {
log.Error("Can not get block by number", "err", err)
break
}
err = b.updateBlockTimestampFunc(b.ctx, height, time.Unix(int64(block.Time), 0))
if err != nil {
log.Error("Can not update blockTimestamp into DB ", "err", err)
break
}
height, err = b.getEarliestNoBlockTimestampHeightFunc(b.ctx)
if err != nil {
log.Error("Can not get latest record without block timestamp", "err", err)
break
}
}
}
}
}()
}
// Stop the BlockTimestampFetcher and log the info
func (b *BlockTimestampFetcher) Stop() {
log.Info("BlockTimestampFetcher Stop")
}

View File

@@ -1,215 +0,0 @@
package crossmsg
import (
"context"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/modern-go/reflect2"
"gorm.io/gorm"
"bridge-history-api/config"
"bridge-history-api/utils"
)
// MsgFetcher fetches cross message events from blockchain and saves them to database
type MsgFetcher struct {
ctx context.Context
config *config.LayerConfig
db *gorm.DB
client *ethclient.Client
worker *FetchEventWorker
reorgHandling ReorgHandling
addressList []common.Address
cachedHeaders []*types.Header
mu sync.Mutex
reorgStartCh chan struct{}
reorgEndCh chan struct{}
}
// NewMsgFetcher creates a new MsgFetcher instance
func NewMsgFetcher(ctx context.Context, config *config.LayerConfig, db *gorm.DB, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*MsgFetcher, error) {
msgFetcher := &MsgFetcher{
ctx: ctx,
config: config,
db: db,
client: client,
worker: worker,
reorgHandling: reorg,
addressList: addressList,
cachedHeaders: make([]*types.Header, 0),
reorgStartCh: make(chan struct{}),
reorgEndCh: make(chan struct{}),
}
return msgFetcher, nil
}
// Start the MsgFetcher
func (c *MsgFetcher) Start() {
log.Info("MsgFetcher Start")
// fetch missing events from finalized blocks, we don't handle reorgs here
c.forwardFetchAndSaveMissingEvents(c.config.Confirmation)
tick := time.NewTicker(time.Duration(c.config.BlockTime) * time.Second)
headerTick := time.NewTicker(time.Duration(c.config.BlockTime/2) * time.Second)
go func() {
for {
select {
case <-c.reorgStartCh:
// create timeout here
timeout := time.NewTicker(300 * time.Second)
select {
case <-c.reorgEndCh:
log.Info("Reorg finished")
timeout.Stop()
case <-timeout.C:
// TODO: need to notify the on-call members to handle reorg manually
timeout.Stop()
log.Crit("Reorg timeout")
}
case <-c.ctx.Done():
tick.Stop()
return
case <-tick.C:
c.mu.Lock()
c.forwardFetchAndSaveMissingEvents(1)
c.mu.Unlock()
}
}
}()
go func() {
for {
select {
case <-c.ctx.Done():
headerTick.Stop()
return
case <-headerTick.C:
c.fetchMissingLatestHeaders()
}
}
}()
}
// Stop the MsgFetcher and log the info
func (c *MsgFetcher) Stop() {
log.Info("MsgFetcher Stop")
}
// forwardFetchAndSaveMissingEvents will fetch all events from the latest processed height to the latest block number.
func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
// if we fetch to the latest block, shall not exceed cachedHeaders
var number uint64
var err error
if len(c.cachedHeaders) != 0 && confirmation == 0 {
number = c.cachedHeaders[len(c.cachedHeaders)-1].Number.Uint64() - 1
} else {
number, err = utils.GetSafeBlockNumber(c.ctx, c.client, confirmation)
if err != nil {
log.Error(fmt.Sprintf("%s: can not get the safe block number", c.worker.Name), "err", err)
return
}
}
if reflect2.IsNil(c.worker.G) || reflect2.IsNil(c.worker.F) {
log.Error(fmt.Sprintf("%s: invalid get/fetch function", c.worker.Name))
return
}
processedHeight, err := c.worker.G(c.ctx, c.db)
if err != nil {
log.Error(fmt.Sprintf("%s: can not get latest processed block height", c.worker.Name))
}
log.Info(fmt.Sprintf("%s: ", c.worker.Name), "height", processedHeight)
if processedHeight <= 0 || processedHeight < c.config.StartHeight {
processedHeight = c.config.StartHeight
} else {
processedHeight++
}
for from := processedHeight; from <= number; from += fetchLimit {
to := from + fetchLimit - 1
if to > number {
to = number
}
// watch for overflow here, tho its unlikely to happen
err := c.worker.F(c.ctx, c.client, c.db, int64(from), int64(to), c.addressList)
if err != nil {
log.Error(fmt.Sprintf("%s: failed!", c.worker.Name), "err", err)
break
}
}
}
func (c *MsgFetcher) fetchMissingLatestHeaders() {
var start int64
number, err := c.client.BlockNumber(c.ctx)
if err != nil {
log.Error("fetchMissingLatestHeaders(): can not get the latest block number", "err", err)
return
}
if len(c.cachedHeaders) > 0 {
start = c.cachedHeaders[len(c.cachedHeaders)-1].Number.Int64() + 1
} else {
start = int64(number - c.config.Confirmation)
}
for i := start; i <= int64(number); i++ {
select {
case <-c.ctx.Done():
close(c.reorgStartCh)
close(c.reorgEndCh)
return
default:
header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(i))
if err != nil {
log.Error("failed to get latest header", "err", err)
return
}
if len(c.cachedHeaders) == 0 {
c.cachedHeaders = MergeAddIntoHeaderList(c.cachedHeaders, []*types.Header{header}, int(c.config.Confirmation))
return
}
//check if the fetched header is child from the last cached header
if IsParentAndChild(c.cachedHeaders[len(c.cachedHeaders)-1], header) {
c.cachedHeaders = MergeAddIntoHeaderList(c.cachedHeaders, []*types.Header{header}, int(c.config.Confirmation))
log.Debug("fetched block into cache", "height", header.Number, "parent hash", header.ParentHash.Hex(), "block hash", c.cachedHeaders[len(c.cachedHeaders)-1].Hash().Hex(), "len", len(c.cachedHeaders))
continue
}
// reorg happened
log.Warn("Reorg happened", "height", header.Number, "parent hash", header.ParentHash.Hex(), "last cached hash", c.cachedHeaders[len(c.cachedHeaders)-1].Hash().Hex(), "last cached height", c.cachedHeaders[len(c.cachedHeaders)-1].Number)
c.reorgStartCh <- struct{}{}
// waiting here if there is fetcher running
c.mu.Lock()
index, ok, validHeaders := BackwardFindReorgBlock(c.ctx, c.cachedHeaders, c.client, header)
if !ok {
log.Error("Reorg happened too earlier than cached headers", "reorg height", header.Number)
num, getSafeErr := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation)
if getSafeErr != nil {
log.Crit("Can not get safe number during reorg, quit the process", "err", err)
}
// clear all our saved data, because no data is safe now
err = c.reorgHandling(c.ctx, num, c.db)
// if handling success then we can update the cachedHeaders
if err == nil {
c.cachedHeaders = c.cachedHeaders[:0]
}
c.mu.Unlock()
c.reorgEndCh <- struct{}{}
return
}
err = c.reorgHandling(c.ctx, c.cachedHeaders[index].Number.Uint64(), c.db)
// if handling success then we can update the cachedHeaders
if err == nil {
c.cachedHeaders = c.cachedHeaders[:index+1]
c.cachedHeaders = MergeAddIntoHeaderList(c.cachedHeaders, validHeaders, int(c.config.Confirmation))
}
c.mu.Unlock()
c.reorgEndCh <- struct{}{}
}
}
}

View File

@@ -1,213 +0,0 @@
package crossmsg
import (
"context"
"math/big"
geth "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
backendabi "bridge-history-api/abi"
"bridge-history-api/orm"
"bridge-history-api/utils"
)
// Todo : read from config
var (
// the number of blocks fetch per round
fetchLimit = uint64(3000)
)
// FetchAndSave is a function type that fetches events from blockchain and saves them to database
type FetchAndSave func(ctx context.Context, client *ethclient.Client, database *gorm.DB, from int64, to int64, addressList []common.Address) error
// GetLatestProcessed is a function type that gets the latest processed block height from database
type GetLatestProcessed func(ctx context.Context, db *gorm.DB) (uint64, error)
// FetchEventWorker defines worker with fetch and save function, processed number getter, and name
type FetchEventWorker struct {
F FetchAndSave
G GetLatestProcessed
Name string
}
// GetLatestL1ProcessedHeight get L1 the latest processed height
func GetLatestL1ProcessedHeight(ctx context.Context, db *gorm.DB) (uint64, error) {
l1CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
crossHeight, err := l1CrossMsgOrm.GetLatestL1ProcessedHeight(ctx)
if err != nil {
log.Error("failed to get L1 cross message processed height: ", "err", err)
return 0, err
}
relayedHeight, err := relayedOrm.GetLatestRelayedHeightOnL1(ctx)
if err != nil {
log.Error("failed to get L1 relayed message processed height: ", "err", err)
return 0, err
}
if crossHeight > relayedHeight {
return crossHeight, nil
}
return relayedHeight, nil
}
// GetLatestL2ProcessedHeight get L2 latest processed height
func GetLatestL2ProcessedHeight(ctx context.Context, db *gorm.DB) (uint64, error) {
l2CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
l2SentMsgOrm := orm.NewL2SentMsg(db)
crossHeight, err := l2CrossMsgOrm.GetLatestL2ProcessedHeight(ctx)
if err != nil {
log.Error("failed to get L2 cross message processed height", "err", err)
return 0, err
}
relayedHeight, err := relayedOrm.GetLatestRelayedHeightOnL2(ctx)
if err != nil {
log.Error("failed to get L2 relayed message processed height", "err", err)
return 0, err
}
l2SentHeight, err := l2SentMsgOrm.GetLatestSentMsgHeightOnL2(ctx)
if err != nil {
log.Error("failed to get L2 sent message processed height", "err", err)
return 0, err
}
maxHeight := crossHeight
if maxHeight < relayedHeight {
maxHeight = relayedHeight
}
if maxHeight < l2SentHeight {
maxHeight = l2SentHeight
}
return maxHeight, nil
}
// L1FetchAndSaveEvents fetch and save events on L1
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, addrList []common.Address) error {
l1CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: addrList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1RelayedMessageEventSignature
query.Topics[0][3] = backendabi.L1SentMessageEventSignature
query.Topics[0][4] = backendabi.L1DepositERC721Sig
query.Topics[0][5] = backendabi.L1DepositERC1155Sig
query.Topics[0][6] = backendabi.L1DepositWETHSig
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get l1 event logs", "err", err)
return err
}
depositL1CrossMsgs, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
if err != nil {
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
err = db.Transaction(func(tx *gorm.DB) error {
if txErr := l1CrossMsgOrm.InsertL1CrossMsg(ctx, depositL1CrossMsgs, tx); txErr != nil {
log.Error("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", txErr)
return txErr
}
if txErr := relayedOrm.InsertRelayedMsg(ctx, relayedMsg, tx); txErr != nil {
log.Error("l1FetchAndSaveEvents: Failed to insert relayed msg event logs", "err", txErr)
return txErr
}
return nil
})
if err != nil {
log.Crit("l2FetchAndSaveEvents: Failed to finish transaction", "err", err)
}
return err
}
// L2FetchAndSaveEvents fetche and save events on L2
func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, addrList []common.Address) error {
l2CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
l2SentMsgOrm := orm.NewL2SentMsg(db)
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: addrList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L2WithdrawETHSig
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
query.Topics[0][2] = backendabi.L2RelayedMessageEventSignature
query.Topics[0][3] = backendabi.L2SentMessageEventSignature
query.Topics[0][4] = backendabi.L2WithdrawERC721Sig
query.Topics[0][5] = backendabi.L2WithdrawERC1155Sig
query.Topics[0][6] = backendabi.L2WithdrawWETHSig
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get l2 event logs", "err", err)
return err
}
depositL2CrossMsgs, relayedMsg, l2SentMsgs, err := utils.ParseBackendL2EventLogs(logs)
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
err = db.Transaction(func(tx *gorm.DB) error {
if txErr := l2CrossMsgOrm.InsertL2CrossMsg(ctx, depositL2CrossMsgs, tx); txErr != nil {
log.Error("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", txErr)
return txErr
}
if txErr := relayedOrm.InsertRelayedMsg(ctx, relayedMsg, tx); txErr != nil {
log.Error("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", txErr)
return txErr
}
if txErr := l2SentMsgOrm.InsertL2SentMsg(ctx, l2SentMsgs, tx); txErr != nil {
log.Error("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", txErr)
return txErr
}
return nil
})
if err != nil {
log.Crit("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
}
return err
}
// FetchAndSaveBatchIndex fetche and save batch index
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, scrollChainAddr common.Address) error {
rollupBatchOrm := orm.NewRollupBatch(db)
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{scrollChainAddr},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 1)
query.Topics[0][0] = backendabi.L1CommitBatchEventSignature
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get batch commit event logs", "err", err)
return err
}
rollupBatches, err := utils.ParseBatchInfoFromScrollChain(ctx, client, logs)
if err != nil {
log.Error("FetchAndSaveBatchIndex: Failed to parse batch commit msg event logs", "err", err)
return err
}
if txErr := rollupBatchOrm.InsertRollupBatch(ctx, rollupBatches); txErr != nil {
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", txErr)
return txErr
}
return nil
}

View File

@@ -1,237 +0,0 @@
package messageproof
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
"bridge-history-api/orm"
)
// MsgProofUpdater is used to update message proof in db
type MsgProofUpdater struct {
ctx context.Context
db *gorm.DB
l2SentMsgOrm *orm.L2SentMsg
rollupOrm *orm.RollupBatch
withdrawTrie *WithdrawTrie
}
// NewMsgProofUpdater new MsgProofUpdater instance
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db *gorm.DB) *MsgProofUpdater {
return &MsgProofUpdater{
ctx: ctx,
db: db,
l2SentMsgOrm: orm.NewL2SentMsg(db),
rollupOrm: orm.NewRollupBatch(db),
withdrawTrie: NewWithdrawTrie(),
}
}
// Start the MsgProofUpdater
func (m *MsgProofUpdater) Start() {
log.Info("MsgProofUpdater Start")
m.initialize(m.ctx)
go func() {
tick := time.NewTicker(10 * time.Second)
for {
select {
case <-m.ctx.Done():
tick.Stop()
return
case <-tick.C:
latestBatch, err := m.rollupOrm.GetLatestRollupBatch(m.ctx)
if err != nil {
log.Warn("MsgProofUpdater: Can not get latest RollupBatch: ", "err", err)
continue
}
if latestBatch == nil {
continue
}
latestBatchIndexWithProof, err := m.l2SentMsgOrm.GetLatestL2SentMsgBatchIndex(m.ctx)
if err != nil {
log.Error("MsgProofUpdater: Can not get latest L2SentMsgBatchIndex: ", "err", err)
continue
}
var start uint64
if latestBatchIndexWithProof < 0 {
start = 1
} else {
start = uint64(latestBatchIndexWithProof) + 1
}
for i := start; i <= latestBatch.BatchIndex; i++ {
batch, err := m.rollupOrm.GetRollupBatchByIndex(m.ctx, i)
if err != nil {
log.Error("MsgProofUpdater: Can not get RollupBatch: ", "err", err, "index", i)
break
}
// get all l2 messages in this batch
msgs, proofs, err := m.appendL2Messages(batch.StartBlockNumber, batch.EndBlockNumber)
if err != nil {
log.Error("MsgProofUpdater: can not append l2messages", "startBlockNumber", batch.StartBlockNumber, "endBlockNumber", batch.EndBlockNumber, "err", err)
break
}
err = m.updateMsgProof(msgs, proofs, batch.BatchIndex)
if err != nil {
log.Error("MsgProofUpdater: can not update msg proof", "err", err)
break
}
}
}
}
}()
}
// Stop the MsgProofUpdater
func (m *MsgProofUpdater) Stop() {
log.Info("MsgProofUpdater Stop")
}
func (m *MsgProofUpdater) initialize(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
err := m.initializeWithdrawTrie()
if err != nil {
log.Error("can not initialize withdraw trie", "err", err)
// give it some time to retry
time.Sleep(10 * time.Second)
continue
}
return
}
}
}
func (m *MsgProofUpdater) initializeWithdrawTrie() error {
var batch *orm.RollupBatch
firstMsg, err := m.l2SentMsgOrm.GetL2SentMessageByNonce(m.ctx, 0)
if err != nil {
return fmt.Errorf("failed to get first l2 message: %v", err)
}
// no l2 message
// TO DO: check if we really dont have l2 sent message with nonce 0
if firstMsg == nil {
log.Info("No first l2sentmsg in db")
return nil
}
// if no batch, return and wait for next try round
batch, err = m.rollupOrm.GetLatestRollupBatch(m.ctx)
if err != nil {
return fmt.Errorf("failed to get latest batch: %v", err)
}
if batch == nil {
return fmt.Errorf("no batch found")
}
var batches []*orm.RollupBatch
batchIndex := batch.BatchIndex
for {
var msg *orm.L2SentMsg
msg, err = m.l2SentMsgOrm.GetLatestL2SentMsgLEHeight(m.ctx, batch.EndBlockNumber)
if err != nil {
log.Warn("failed to get l2 sent message less than height", "endBlocknum", batch.EndBlockNumber, "err", err)
}
if msg != nil && msg.MsgProof != "" {
log.Info("Found latest l2 sent msg with proof: ", "msg_proof", msg.MsgProof, "height", msg.Height, "msg_hash", msg.MsgHash)
// initialize withdrawTrie
proofBytes := common.Hex2Bytes(msg.MsgProof)
m.withdrawTrie.Initialize(msg.Nonce, common.HexToHash(msg.MsgHash), proofBytes)
break
}
// append unprocessed batch
batches = append(batches, batch)
if batchIndex == 1 {
// otherwise overflow
// and batchIndex 0 is not in DB
// To Do: check if we dont have batch with index 0 in future
break
}
// iterate for next batch
batchIndex--
batch, err = m.rollupOrm.GetRollupBatchByIndex(m.ctx, batchIndex)
if err != nil || batch == nil {
return fmt.Errorf("failed to get block batch %v: %v", batchIndex, err)
}
}
log.Info("Build withdraw trie with pending messages")
for i := len(batches) - 1; i >= 0; i-- {
b := batches[i]
msgs, proofs, err := m.appendL2Messages(b.StartBlockNumber, b.EndBlockNumber)
if err != nil {
return err
}
err = m.updateMsgProof(msgs, proofs, b.BatchIndex)
if err != nil {
return err
}
}
log.Info("Build withdraw trie finished")
return nil
}
func (m *MsgProofUpdater) updateMsgProof(msgs []*orm.L2SentMsg, proofs [][]byte, batchIndex uint64) error {
if len(msgs) == 0 {
return nil
}
// this should not happen, but double check
if len(msgs) != len(proofs) {
return fmt.Errorf("illegal state: len(msgs) != len(proofs)")
}
err := m.db.Transaction(func(tx *gorm.DB) error {
for i, msg := range msgs {
proofHex := common.Bytes2Hex(proofs[i])
log.Debug("updateMsgProof", "msgHash", msg.MsgHash, "batchIndex", batchIndex, "proof", proofHex)
if err := m.l2SentMsgOrm.UpdateL2MessageProof(m.ctx, msg.MsgHash, proofHex, batchIndex, tx); err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
return nil
}
// appendL2Messages will append all messages between firstBlock and lastBlock (both inclusive) to withdrawTrie and compute corresponding merkle proof of each message.
func (m *MsgProofUpdater) appendL2Messages(firstBlock, lastBlock uint64) ([]*orm.L2SentMsg, [][]byte, error) {
var msgProofs [][]byte
messages, err := m.l2SentMsgOrm.GetL2SentMsgMsgHashByHeightRange(m.ctx, firstBlock, lastBlock)
if err != nil {
log.Error("GetL2SentMsgMsgHashByHeightRange failed", "error", err, "firstBlock", firstBlock, "lastBlock", lastBlock)
return messages, msgProofs, err
}
if len(messages) == 0 {
return messages, msgProofs, nil
}
// double check whether nonce is matched
if messages[0].Nonce != m.withdrawTrie.NextMessageNonce {
log.Error("L2 message nonce mismatch", "expected", m.withdrawTrie.NextMessageNonce, "found", messages[0].Nonce)
return messages, msgProofs, fmt.Errorf("l2 message nonce mismatch, expected: %v, found: %v", m.withdrawTrie.NextMessageNonce, messages[0].Nonce)
}
var hashes []common.Hash
for _, msg := range messages {
hashes = append(hashes, common.HexToHash(msg.MsgHash))
}
msgProofs = m.withdrawTrie.AppendMessages(hashes)
return messages, msgProofs, nil
}

View File

@@ -1,189 +0,0 @@
package messageproof
import (
"github.com/ethereum/go-ethereum/common"
"bridge-history-api/utils"
)
// MaxHeight is the maixium possible height of withdraw trie
const MaxHeight = 40
// WithdrawTrie is an append only merkle trie
type WithdrawTrie struct {
// used to rebuild the merkle tree
NextMessageNonce uint64
height int // current height of withdraw trie
branches []common.Hash
zeroes []common.Hash
}
// NewWithdrawTrie will return a new instance of WithdrawTrie
func NewWithdrawTrie() *WithdrawTrie {
zeroes := make([]common.Hash, MaxHeight)
branches := make([]common.Hash, MaxHeight)
zeroes[0] = common.Hash{}
for i := 1; i < MaxHeight; i++ {
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
}
return &WithdrawTrie{
zeroes: zeroes,
branches: branches,
height: -1,
NextMessageNonce: 0,
}
}
// Initialize will initialize the merkle trie with rightest leaf node
func (w *WithdrawTrie) Initialize(currentMessageNonce uint64, msgHash common.Hash, proofBytes []byte) {
proof := DecodeBytesToMerkleProof(proofBytes)
branches := RecoverBranchFromProof(proof, currentMessageNonce, msgHash)
w.height = len(proof)
w.branches = branches
w.NextMessageNonce = currentMessageNonce + 1
}
// AppendMessages appends a list of new messages as leaf nodes to the rightest of the tree and returns the proofs for all messages.
func (w *WithdrawTrie) AppendMessages(hashes []common.Hash) [][]byte {
length := len(hashes)
if length == 0 {
return make([][]byte, 0)
}
cache := make([]map[uint64]common.Hash, MaxHeight)
for h := 0; h < MaxHeight; h++ {
cache[h] = make(map[uint64]common.Hash)
}
// cache all branches will be used later.
if w.NextMessageNonce != 0 {
index := w.NextMessageNonce
for h := 0; h <= w.height; h++ {
if index%2 == 1 {
// right child, `w.branches[h]` is the corresponding left child
// the index of left child should be `index ^ 1`.
cache[h][index^1] = w.branches[h]
}
index >>= 1
}
}
// cache all new leaves
for i := 0; i < length; i++ {
cache[0][w.NextMessageNonce+uint64(i)] = hashes[i]
}
// build withdraw trie with new hashes
minIndex := w.NextMessageNonce
maxIndex := w.NextMessageNonce + uint64(length) - 1
for h := 0; maxIndex > 0; h++ {
if minIndex%2 == 1 {
minIndex--
}
if maxIndex%2 == 0 {
cache[h][maxIndex^1] = w.zeroes[h]
}
for i := minIndex; i <= maxIndex; i += 2 {
cache[h+1][i>>1] = utils.Keccak2(cache[h][i], cache[h][i^1])
}
minIndex >>= 1
maxIndex >>= 1
}
// update branches using hashes one by one
for i := 0; i < length; i++ {
proof := UpdateBranchWithNewMessage(w.zeroes, w.branches, w.NextMessageNonce, hashes[i])
w.NextMessageNonce++
w.height = len(proof)
}
proofs := make([][]byte, length)
// retrieve merkle proof from cache
for i := 0; i < length; i++ {
index := w.NextMessageNonce + uint64(i) - uint64(length)
var merkleProof []common.Hash
for h := 0; h < w.height; h++ {
merkleProof = append(merkleProof, cache[h][index^1])
index >>= 1
}
proofs[i] = EncodeMerkleProofToBytes(merkleProof)
}
return proofs
}
// MessageRoot return the current root hash of withdraw trie.
func (w *WithdrawTrie) MessageRoot() common.Hash {
if w.height == -1 {
return common.Hash{}
}
return w.branches[w.height]
}
// DecodeBytesToMerkleProof transfer byte array to bytes32 array. The caller should make sure the length is matched.
func DecodeBytesToMerkleProof(proofBytes []byte) []common.Hash {
proof := make([]common.Hash, len(proofBytes)/32)
for i := 0; i < len(proofBytes); i += 32 {
proof[i/32] = common.BytesToHash(proofBytes[i : i+32])
}
return proof
}
// EncodeMerkleProofToBytes transfer byte32 array to byte array by concatenation.
func EncodeMerkleProofToBytes(proof []common.Hash) []byte {
var proofBytes []byte
for i := 0; i < len(proof); i++ {
proofBytes = append(proofBytes, proof[i][:]...)
}
return proofBytes
}
// UpdateBranchWithNewMessage update the branches to latest with new message and return the merkle proof for the message.
func UpdateBranchWithNewMessage(zeroes []common.Hash, branches []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
root := msgHash
var merkleProof []common.Hash
var height uint64
for height = 0; index > 0; height++ {
if index%2 == 0 {
// it may be used in next round.
branches[height] = root
merkleProof = append(merkleProof, zeroes[height])
// it's a left child, the right child must be null
root = utils.Keccak2(root, zeroes[height])
} else {
// it's a right child, use previously computed hash
root = utils.Keccak2(branches[height], root)
merkleProof = append(merkleProof, branches[height])
}
index >>= 1
}
branches[height] = root
return merkleProof
}
// RecoverBranchFromProof will recover latest branches from merkle proof and message hash
func RecoverBranchFromProof(proof []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
branches := make([]common.Hash, 64)
root := msgHash
var height uint64
for height = 0; index > 0; height++ {
if index%2 == 0 {
branches[height] = root
// it's a left child, the right child must be null
root = utils.Keccak2(root, proof[height])
} else {
// it's a right child, use previously computed hash
branches[height] = proof[height]
root = utils.Keccak2(proof[height], root)
}
index >>= 1
}
branches[height] = root
for height++; height < 64; height++ {
branches[height] = common.Hash{}
}
return branches
}

View File

@@ -1,212 +0,0 @@
package messageproof
import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/assert"
"bridge-history-api/utils"
)
func TestUpdateBranchWithNewMessage(t *testing.T) {
zeroes := make([]common.Hash, 64)
branches := make([]common.Hash, 64)
zeroes[0] = common.Hash{}
for i := 1; i < 64; i++ {
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
}
UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
if branches[0] != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") {
t.Fatalf("Invalid root, want %s, got %s", "0x0000000000000000000000000000000000000000000000000000000000000001", branches[0].Hex())
}
UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
if branches[1] != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
t.Fatalf("Invalid root, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", branches[1].Hex())
}
UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
if branches[2] != common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c") {
t.Fatalf("Invalid root, want %s, got %s", "0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c", branches[2].Hex())
}
UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
if branches[2] != common.HexToHash("0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36") {
t.Fatalf("Invalid root, want %s, got %s", "0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36", branches[2].Hex())
}
}
func TestDecodeEncodeMerkleProof(t *testing.T) {
proof := DecodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
if len(proof) != 4 {
t.Fatalf("proof length mismatch, want %d, got %d", 4, len(proof))
}
if proof[0] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4901") {
t.Fatalf("proof[0] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4901", proof[0].Hex())
}
if proof[1] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4902") {
t.Fatalf("proof[1] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4902", proof[0].Hex())
}
if proof[2] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4903") {
t.Fatalf("proof[2] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4903", proof[0].Hex())
}
if proof[3] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904") {
t.Fatalf("proof[3] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904", proof[0].Hex())
}
bytes := EncodeMerkleProofToBytes(proof)
if common.Bytes2Hex(bytes) != "2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904" {
t.Fatalf("wrong encoded bytes")
}
}
func TestRecoverBranchFromProof(t *testing.T) {
zeroes := make([]common.Hash, 64)
branches := make([]common.Hash, 64)
zeroes[0] = common.Hash{}
for i := 1; i < 64; i++ {
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
}
proof := UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
tmpBranches := RecoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
tmpBranches = RecoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
tmpBranches = RecoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
tmpBranches = RecoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
}
func TestWithdrawTrieOneByOne(t *testing.T) {
for initial := 0; initial < 128; initial++ {
withdrawTrie := NewWithdrawTrie()
var hashes []common.Hash
for i := 0; i < initial; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
withdrawTrie.AppendMessages([]common.Hash{
hash,
})
}
for i := initial; i < 128; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
expectedRoot := computeMerkleRoot(hashes)
proofBytes := withdrawTrie.AppendMessages([]common.Hash{
hash,
})
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(i+1))
assert.Equal(t, expectedRoot.String(), withdrawTrie.MessageRoot().String())
proof := DecodeBytesToMerkleProof(proofBytes[0])
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
assert.Equal(t, expectedRoot.String(), verifiedRoot.String())
}
}
}
func TestWithdrawTrieMultiple(t *testing.T) {
var expectedRoots []common.Hash
{
var hashes []common.Hash
for i := 0; i < 128; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
expectedRoots = append(expectedRoots, computeMerkleRoot(hashes))
}
}
for initial := 0; initial < 100; initial++ {
var hashes []common.Hash
for i := 0; i < initial; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
}
for finish := initial; finish < 100; finish++ {
withdrawTrie := NewWithdrawTrie()
withdrawTrie.AppendMessages(hashes)
var newHashes []common.Hash
for i := initial; i <= finish; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
newHashes = append(newHashes, hash)
}
proofBytes := withdrawTrie.AppendMessages(newHashes)
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(finish+1))
assert.Equal(t, expectedRoots[finish].String(), withdrawTrie.MessageRoot().String())
for i := initial; i <= finish; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
proof := DecodeBytesToMerkleProof(proofBytes[i-initial])
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
assert.Equal(t, expectedRoots[finish].String(), verifiedRoot.String())
}
}
}
}
func verifyMerkleProof(index uint64, leaf common.Hash, proof []common.Hash) common.Hash {
root := leaf
for _, h := range proof {
if index%2 == 0 {
root = utils.Keccak2(root, h)
} else {
root = utils.Keccak2(h, root)
}
index >>= 1
}
return root
}
func computeMerkleRoot(hashes []common.Hash) common.Hash {
if len(hashes) == 0 {
return common.Hash{}
}
zeroHash := common.Hash{}
for {
if len(hashes) == 1 {
break
}
var newHashes []common.Hash
for i := 0; i < len(hashes); i += 2 {
if i+1 < len(hashes) {
newHashes = append(newHashes, utils.Keccak2(hashes[i], hashes[i+1]))
} else {
newHashes = append(newHashes, utils.Keccak2(hashes[i], zeroHash))
}
}
hashes = newHashes
zeroHash = utils.Keccak2(zeroHash, zeroHash)
}
return hashes[0]
}

View File

@@ -1,65 +0,0 @@
package crossmsg_test
import (
"crypto/rand"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"bridge-history-api/crossmsg"
)
func TestMergeIntoList(t *testing.T) {
headers, err := generateHeaders(64)
assert.NoError(t, err)
assert.Equal(t, headers[0].Hash(), headers[1].ParentHash)
headers2, err := generateHeaders(18)
assert.NoError(t, err)
result := crossmsg.MergeAddIntoHeaderList(headers, headers2, 64)
assert.Equal(t, 64, len(result))
assert.Equal(t, headers2[len(headers2)-1], result[len(result)-1])
assert.NotEqual(t, headers[0], result[0])
}
func generateHeaders(amount int) ([]*types.Header, error) {
headers := make([]*types.Header, amount)
for i := 0; i < amount; i++ {
var parentHash common.Hash
if i > 0 {
parentHash = headers[i-1].Hash()
}
nonce, err := rand.Int(rand.Reader, big.NewInt(1<<63-1))
if err != nil {
return nil, err
}
difficulty := big.NewInt(131072)
header := &types.Header{
ParentHash: parentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: common.Address{},
Root: common.Hash{},
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
Bloom: types.Bloom{},
Difficulty: difficulty,
Number: big.NewInt(int64(i)),
GasLimit: 5000000,
GasUsed: 0,
Time: uint64(i * 15),
Extra: []byte{},
MixDigest: common.Hash{},
Nonce: types.EncodeNonce(nonce.Uint64()),
}
headers[i] = header
}
return headers, nil
}
// TODO: add more test cases
// func TestReorg(t *testing.T)

View File

@@ -1,108 +0,0 @@
package crossmsg
import (
"context"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
"bridge-history-api/orm"
)
// ReorgHandling handles reorg function type
type ReorgHandling func(ctx context.Context, reorgHeight uint64, db *gorm.DB) error
func reverseArray(arr []*types.Header) []*types.Header {
for i := 0; i < len(arr)/2; i++ {
j := len(arr) - i - 1
arr[i], arr[j] = arr[j], arr[i]
}
return arr
}
// IsParentAndChild match the child header ParentHash with parent header Hash
func IsParentAndChild(parentHeader *types.Header, header *types.Header) bool {
return header.ParentHash == parentHeader.Hash()
}
// MergeAddIntoHeaderList merges two header lists, if exceed the max length then drop the oldest entries
func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []*types.Header {
mergedArr := append(baseArr, extraArr...)
if len(mergedArr) <= maxLength {
return mergedArr
}
startIndex := len(mergedArr) - maxLength
return mergedArr[startIndex:]
}
// BackwardFindReorgBlock finds the reorg block by backward search
func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, lastHeader *types.Header) (int, bool, []*types.Header) {
maxStep := len(headers)
backwardHeaderList := []*types.Header{lastHeader}
for iterRound := 0; iterRound < maxStep; iterRound++ {
header, err := client.HeaderByHash(ctx, lastHeader.ParentHash)
if err != nil {
log.Error("BackwardFindReorgBlock failed", "error", err)
return -1, false, nil
}
backwardHeaderList = append(backwardHeaderList, header)
for j := len(headers) - 1; j >= 0; j-- {
if IsParentAndChild(headers[j], header) {
backwardHeaderList = reverseArray(backwardHeaderList)
return j, true, backwardHeaderList
}
}
lastHeader = header
}
return -1, false, nil
}
// L1ReorgHandling handles l1 reorg
func L1ReorgHandling(ctx context.Context, reorgHeight uint64, db *gorm.DB) error {
l1CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
err := db.Transaction(func(tx *gorm.DB) error {
if err := l1CrossMsgOrm.DeleteL1CrossMsgAfterHeight(ctx, reorgHeight, tx); err != nil {
log.Error("delete l1 cross msg from height", "height", reorgHeight, "err", err)
return err
}
if err := relayedOrm.DeleteL1RelayedHashAfterHeight(ctx, reorgHeight, tx); err != nil {
log.Error("delete l1 relayed msg from height", "height", reorgHeight, "err", err)
return err
}
return nil
})
if err != nil {
log.Crit("l1 reorg handling failed", "err", err)
}
return err
}
// L2ReorgHandling handles l2 reorg
func L2ReorgHandling(ctx context.Context, reorgHeight uint64, db *gorm.DB) error {
l2CrossMsgOrm := orm.NewCrossMsg(db)
relayedOrm := orm.NewRelayedMsg(db)
l2SentMsgOrm := orm.NewL2SentMsg(db)
err := db.Transaction(func(tx *gorm.DB) error {
if err := l2CrossMsgOrm.DeleteL2CrossMsgFromHeight(ctx, reorgHeight, tx); err != nil {
log.Error("delete l2 cross msg from height", "height", reorgHeight, "err", err)
return err
}
if err := relayedOrm.DeleteL2RelayedHashAfterHeight(ctx, reorgHeight, tx); err != nil {
log.Error("delete l2 relayed msg from height", "height", reorgHeight, "err", err)
return err
}
if err := l2SentMsgOrm.DeleteL2SentMsgAfterHeight(ctx, reorgHeight, tx); err != nil {
log.Error("delete l2 sent msg from height", "height", reorgHeight, "err", err)
return err
}
return nil
})
if err != nil {
log.Crit("l2 reorg handling failed", "err", err)
}
return err
}

View File

@@ -1,153 +0,0 @@
module bridge-history-api
go 1.19
require (
github.com/ethereum/go-ethereum v1.12.0
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
github.com/kataras/iris/v12 v12.2.0
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.19
github.com/modern-go/reflect2 v1.0.2
github.com/pressly/goose/v3 v3.7.0
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
gorm.io/gorm v1.25.2
)
require (
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 // indirect
github.com/CloudyKit/jet/v6 v6.2.0 // indirect
github.com/DataDog/zstd v1.5.2 // indirect
github.com/Joker/jade v1.1.3 // indirect
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.7.0 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cockroachdb/errors v1.9.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
github.com/cockroachdb/redact v1.1.3 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.10.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/crate-crypto/go-kzg-4844 v0.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v23.0.6+incompatible // indirect
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/flosch/pongo2/v4 v4.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/gobwas/httphead v0.1.0 // indirect
github.com/gobwas/pool v0.2.1 // indirect
github.com/gobwas/ws v1.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/css v1.0.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/iris-contrib/go.uuid v2.0.0+incompatible // indirect
github.com/iris-contrib/schema v0.0.6 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/kataras/blocks v0.0.7 // indirect
github.com/kataras/golog v0.1.8 // indirect
github.com/kataras/neffos v0.0.21 // indirect
github.com/kataras/pio v0.0.11 // indirect
github.com/kataras/sitemap v0.0.6 // indirect
github.com/kataras/tunnel v0.0.4 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/mailgun/raymond/v2 v2.0.48 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mediocregopher/radix/v3 v3.8.1 // indirect
github.com/microcosm-cc/bluemonday v1.0.25 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/nats-io/nats.go v1.23.0 // indirect
github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.27.1 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/schollz/closestmatch v2.1.0+incompatible // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tdewolff/minify/v2 v2.12.4 // indirect
github.com/tdewolff/parse/v2 v2.6.4 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yosssi/ace v0.0.5 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.11.0 // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

View File

@@ -1,732 +0,0 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 h1:sR+/8Yb4slttB4vD+b9btVEnWgL3Q00OBTzVT8B9C0c=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
github.com/CloudyKit/jet/v6 v6.2.0 h1:EpcZ6SR9n28BUGtNJSvlBqf90IpjeFr36Tizxhn/oME=
github.com/CloudyKit/jet/v6 v6.2.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4=
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/Joker/hpp v1.0.0 h1:65+iuJYdRXv/XyN62C1uEmmOx3432rNG/rKlX6V7Kkc=
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.1.3 h1:Qbeh12Vq6BxURXT1qZBRHsDxeURB8ztcL6f3EXSGeHk=
github.com/Joker/jade v1.1.3/go.mod h1:T+2WLyt7VH6Lp0TRxQrUYEs64nRc83wkMQrfeIQKduM=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06 h1:KkH3I3sJuOLP3TjA/dfr4NAY8bghDwnXiU7cTKxQqo0=
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM=
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8=
github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk=
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk=
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM=
github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.10.0 h1:zRh22SR7o4K35SoNqouS9J/TKHTyU2QWaj5ldehyXtA=
github.com/consensys/gnark-crypto v0.10.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v0.2.0 h1:UVuHOE+5tIWrim4zf/Xaa43+MIsDCPyW76QhUpiMGj4=
github.com/crate-crypto/go-kzg-4844 v0.2.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum/c-kzg-4844 v0.2.0 h1:+cUvymlnoDDQgMInp25Bo3OmLajmmY8mLJ/tLjqd77Q=
github.com/ethereum/c-kzg-4844 v0.2.0/go.mod h1:WI2Nd82DMZAAZI1wV2neKGost9EKjvbpQR9OqE5Qqa8=
github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0=
github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flosch/pongo2/v4 v4.0.2 h1:gv+5Pe3vaSVmiJvh/BZa82b7/00YUGm0PIyVVLop0Hw=
github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/gobwas/ws v1.1.0 h1:7RFti/xnNkMJnrK7D1yQ/iCIB5OrrY/54/H930kIbHA=
github.com/gobwas/ws v1.1.0/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk=
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k=
github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs=
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM=
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
github.com/iris-contrib/go.uuid v2.0.0+incompatible h1:XZubAYg61/JwnJNbZilGjf3b3pB80+OQg2qf6c8BfWE=
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
github.com/iris-contrib/httpexpect/v2 v2.12.1 h1:3cTZSyBBen/kfjCtgNFoUKi1u0FVXNaAjyRJOo6AVS4=
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458 h1:V60rHQJc6DieKV1BqHIGclraPdO4kinuFAZIrPGHN7s=
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458/go.mod h1:7eVziAp1yUwFB/ZMg71n84VWQH+7wukvxcHuF2e7cbg=
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
github.com/iris-contrib/schema v0.0.6 h1:CPSBLyx2e91H2yJzPuhGuifVRnZBBJ3pCOMbOvPZaTw=
github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/kataras/blocks v0.0.7 h1:cF3RDY/vxnSRezc7vLFlQFTYXG/yAr1o7WImJuZbzC4=
github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I=
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
github.com/kataras/golog v0.1.8 h1:isP8th4PJH2SrbkciKnylaND9xoTtfxv++NB+DF0l9g=
github.com/kataras/golog v0.1.8/go.mod h1:rGPAin4hYROfk1qT9wZP6VY2rsb4zzc37QpdPjdkqVw=
github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
github.com/kataras/iris/v12 v12.2.0 h1:WzDY5nGuW/LgVaFS5BtTkW3crdSKJ/FEgWnxPnIVVLI=
github.com/kataras/iris/v12 v12.2.0/go.mod h1:BLzBpEunc41GbE68OUaQlqX4jzi791mx5HU04uPb90Y=
github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
github.com/kataras/neffos v0.0.21 h1:UwN/F44jlqdtgFI29y3VhA7IlJ4JbK3UjCbTDg1pYoo=
github.com/kataras/neffos v0.0.21/go.mod h1:FeGka8lu8cjD2H+0OpBvW8c6xXawy3fj5VX6xcIJ1Fg=
github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
github.com/kataras/pio v0.0.11 h1:kqreJ5KOEXGMwHAWHDwIl+mjfNCPhAwZPa8gK7MKlyw=
github.com/kataras/pio v0.0.11/go.mod h1:38hH6SWH6m4DKSYmRhlrCJ5WItwWgCVrTNU62XZyUvI=
github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
github.com/kataras/sitemap v0.0.6 h1:w71CRMMKYMJh6LR2wTgnk5hSgjVNB9KL60n5e2KHvLY=
github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4=
github.com/kataras/tunnel v0.0.4 h1:sCAqWuJV7nPzGrlb0os3j49lk2JhILT0rID38NHNLpA=
github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw=
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M=
github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/microcosm-cc/bluemonday v1.0.25 h1:4NEwSfiJ+Wva0VxN5B8OwMicaJvD8r9tlJWm9rtloEg=
github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/nats-io/jwt v0.3.0 h1:xdnzwFETV++jNc4W1mw//qFyJGb2ABOombmZJQS4+Qo=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI=
github.com/nats-io/nats-server/v2 v2.9.11 h1:4y5SwWvWI59V5mcqtuoqKq6L9NDUydOP3Ekwuwl8cZI=
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nats.go v1.23.0 h1:lR28r7IX44WjYgdiKz9GmUeW0uh/m33uD3yEjLZ2cOE=
github.com/nats-io/nats.go v1.23.0/go.mod h1:ki/Scsa23edbh8IRZbCuNXR9TDcbvfaSijKtaqQgw+Q=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo=
github.com/schollz/closestmatch v2.1.0+incompatible h1:Uel2GXEpJqOWBrlyI+oY9LTiyyjYS17cCYRqP13/SHk=
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM=
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tdewolff/minify/v2 v2.12.4 h1:kejsHQMM17n6/gwdw53qsi6lg0TGddZADVyQOz1KMdE=
github.com/tdewolff/minify/v2 v2.12.4/go.mod h1:h+SRvSIX3kwgwTFOpSckvSxgax3uy8kZTSF1Ojrr3bk=
github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZycQ=
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM=
github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
github.com/yosssi/ace v0.0.5 h1:tUkIP/BLdKqrlrPwcmH0shwEEhTRHoGnc1wFIWmaBUA=
github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0=
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201207223542-d4d67f95c62d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=

View File

@@ -1,13 +0,0 @@
package model
// QueryByAddressRequest the request parameter of address api
type QueryByAddressRequest struct {
Address string `url:"address"`
Offset int `url:"offset"`
Limit int `url:"limit"`
}
// QueryByHashRequest the request parameter of hash api
type QueryByHashRequest struct {
Txs []string `url:"txs"`
}

View File

@@ -1,21 +0,0 @@
package model
import "bridge-history-api/service"
// Data the return struct of apis
type Data struct {
Result []*service.TxHistoryInfo `json:"result"`
Total uint64 `json:"total"`
}
// QueryByAddressResponse the schema of address api response
type QueryByAddressResponse struct {
Message string `json:"message"`
Data *Data `json:"data"`
}
// QueryByHashResponse the schema of hash api response
type QueryByHashResponse struct {
Message string `json:"message"`
Data *Data `json:"data"`
}

View File

@@ -1,91 +0,0 @@
package orm
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
)
// RollupBatch is the struct for rollup_batch table
type RollupBatch struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
BatchHash string `json:"batch_hash" gorm:"column:batch_hash"`
CommitHeight uint64 `json:"commit_height" gorm:"column:commit_height"`
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewRollupBatch create an RollupBatch instance
func NewRollupBatch(db *gorm.DB) *RollupBatch {
return &RollupBatch{db: db}
}
// TableName returns the table name for the Batch model.
func (*RollupBatch) TableName() string {
return "rollup_batch"
}
// GetLatestRollupBatchProcessedHeight return latest processed height from rollup_batch table
func (r *RollupBatch) GetLatestRollupBatchProcessedHeight(ctx context.Context) (uint64, error) {
var result RollupBatch
err := r.db.WithContext(ctx).Unscoped().Select("commit_height").Order("id desc").First(&result).Error
if err != nil {
return 0, fmt.Errorf("RollupBatch.GetLatestRollupBatchProcessedHeight error: %w", err)
}
return result.CommitHeight, nil
}
// GetLatestRollupBatch return the latest rollup batch in db
func (r *RollupBatch) GetLatestRollupBatch(ctx context.Context) (*RollupBatch, error) {
var result RollupBatch
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_hash is not NULL").Order("batch_index desc").First(&result).Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("RollupBatch.GetLatestRollupBatch error: %w", err)
}
return &result, nil
}
// GetRollupBatchByIndex return the rollup batch by index
func (r *RollupBatch) GetRollupBatchByIndex(ctx context.Context, index uint64) (*RollupBatch, error) {
var result RollupBatch
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_index = ?", index).First(&result).Error
if err != nil {
return nil, fmt.Errorf("RollupBatch.GetRollupBatchByIndex error: %w", err)
}
return &result, nil
}
// InsertRollupBatch batch insert rollup batch into db and return the transaction
func (r *RollupBatch) InsertRollupBatch(ctx context.Context, batches []*RollupBatch, dbTx ...*gorm.DB) error {
if len(batches) == 0 {
return nil
}
db := r.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
err := db.WithContext(ctx).Model(&RollupBatch{}).Create(&batches).Error
if err != nil {
batchIndexes := make([]uint64, 0, len(batches))
heights := make([]uint64, 0, len(batches))
for _, batch := range batches {
batchIndexes = append(batchIndexes, batch.BatchIndex)
heights = append(heights, batch.CommitHeight)
}
log.Error("failed to insert rollup batch", "batchIndexes", batchIndexes, "heights", heights)
return fmt.Errorf("RollupBatch.InsertRollupBatch error: %w", err)
}
return nil
}

View File

@@ -1,370 +0,0 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
)
// AssetType can be ETH/ERC20/ERC1155/ERC721
type AssetType int
// MsgType can be layer1/layer2 msg
type MsgType int
func (a AssetType) String() string {
switch a {
case ETH:
return "ETH"
case ERC20:
return "ERC20"
case ERC1155:
return "ERC1155"
case ERC721:
return "ERC721"
}
return "Unknown Asset Type"
}
const (
// ETH = 0
ETH AssetType = iota
// ERC20 = 1
ERC20
// ERC721 = 2
ERC721
// ERC1155 = 3
ERC1155
)
const (
// UnknownMsg = 0
UnknownMsg MsgType = iota
// Layer1Msg = 1
Layer1Msg
// Layer2Msg = 2
Layer2Msg
)
// CrossMsg represents a cross message from layer 1 to layer 2
type CrossMsg struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id"`
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
Height uint64 `json:"height" gorm:"column:height"`
Sender string `json:"sender" gorm:"column:sender"`
Target string `json:"target" gorm:"column:target"`
Amount string `json:"amount" gorm:"column:amount"`
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:''"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:''"`
Layer1Token string `json:"layer1_token" gorm:"column:layer1_token;default:''"`
Layer2Token string `json:"layer2_token" gorm:"column:layer2_token;default:''"`
TokenIDs string `json:"token_ids" gorm:"column:token_ids;default:''"`
TokenAmounts string `json:"token_amounts" gorm:"column:token_amounts;default:''"`
Asset int `json:"asset" gorm:"column:asset"`
MsgType int `json:"msg_type" gorm:"column:msg_type"`
Timestamp *time.Time `json:"timestamp" gorm:"column:block_timestamp;default;NULL"`
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// TableName returns the table name for the CrossMsg model.
func (*CrossMsg) TableName() string {
return "cross_message"
}
// NewCrossMsg returns a new instance of CrossMsg.
func NewCrossMsg(db *gorm.DB) *CrossMsg {
return &CrossMsg{db: db}
}
// L1 Cross Msgs Operations
// GetL1CrossMsgByHash returns layer1 cross message by given hash
func (c *CrossMsg) GetL1CrossMsgByHash(ctx context.Context, l1Hash common.Hash) (*CrossMsg, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer1_hash = ? AND msg_type = ?", l1Hash.String(), Layer1Msg).First(&result).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("CrossMsg.GetL1CrossMsgByHash error: %w", err)
}
return &result, nil
}
// GetLatestL1ProcessedHeight returns the latest processed height of layer1 cross messages
func (c *CrossMsg) GetLatestL1ProcessedHeight(ctx context.Context) (uint64, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("msg_type = ?", Layer1Msg).
Select("height").
Order("id DESC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("CrossMsg.GetLatestL1ProcessedHeight error: %w", err)
}
return result.Height, nil
}
// GetL1EarliestNoBlockTimestampHeight returns the earliest layer1 cross message height which has no block timestamp
func (c *CrossMsg) GetL1EarliestNoBlockTimestampHeight(ctx context.Context) (uint64, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("block_timestamp IS NULL AND msg_type = ?", Layer1Msg).
Select("height").
Order("height ASC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("CrossMsg.GetL1EarliestNoBlockTimestampHeight error: %w", err)
}
return result.Height, nil
}
// InsertL1CrossMsg batch insert layer1 cross messages into db
func (c *CrossMsg) InsertL1CrossMsg(ctx context.Context, messages []*CrossMsg, dbTx ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&CrossMsg{}).Create(&messages).Error
if err != nil {
l1hashes := make([]string, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
l1hashes = append(l1hashes, msg.Layer1Hash)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l1 cross messages", "l1hashes", l1hashes, "heights", heights, "err", err)
return fmt.Errorf("CrossMsg.InsertL1CrossMsg error: %w", err)
}
return nil
}
// UpdateL1CrossMsgHash update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
func (c *CrossMsg) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash, dbTx ...*gorm.DB) error {
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := c.db.Model(&CrossMsg{}).Where("layer1_hash = ?", l1Hash.Hex()).Update("msg_hash", msgHash.Hex()).Error
if err != nil {
return fmt.Errorf("CrossMsg.UpdateL1CrossMsgHash error: %w", err)
}
return nil
}
// UpdateL1BlockTimestamp update layer1 block timestamp
func (c *CrossMsg) UpdateL1BlockTimestamp(ctx context.Context, height uint64, timestamp time.Time) error {
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("height = ? AND msg_type = ?", height, Layer1Msg).
Update("block_timestamp", timestamp).Error
if err != nil {
return fmt.Errorf("CrossMsg.UpdateL1BlockTimestamp error: %w", err)
}
return err
}
// DeleteL1CrossMsgAfterHeight soft delete layer1 cross messages after given height
func (c *CrossMsg) DeleteL1CrossMsgAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Delete(&CrossMsg{}, "height > ? AND msg_type = ?", height, Layer1Msg).Error
if err != nil {
return fmt.Errorf("CrossMsg.DeleteL1CrossMsgAfterHeight error: %w", err)
}
return nil
}
// L2 Cross Msgs Operations
// GetL2CrossMsgByHash returns layer2 cross message by given hash
func (c *CrossMsg) GetL2CrossMsgByHash(ctx context.Context, l2Hash common.Hash) (*CrossMsg, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer2_hash = ? AND msg_type = ?", l2Hash.String(), Layer1Msg).First(&result).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("CrossMsg.GetL2CrossMsgByHash error: %w", err)
}
return &result, nil
}
// GetLatestL2ProcessedHeight returns the latest processed height of layer2 cross messages
func (c *CrossMsg) GetLatestL2ProcessedHeight(ctx context.Context) (uint64, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Select("height").
Where("msg_type = ?", Layer2Msg).
Order("id DESC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("CrossMsg.GetLatestL2ProcessedHeight error: %w", err)
}
return result.Height, nil
}
// GetL2CrossMsgByMsgHashList returns layer2 cross messages under given msg hashes
func (c *CrossMsg) GetL2CrossMsgByMsgHashList(ctx context.Context, msgHashList []string) ([]*CrossMsg, error) {
var results []*CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("msg_hash IN (?) AND msg_type = ?", msgHashList, Layer2Msg).
Find(&results).
Error
if err != nil {
return nil, fmt.Errorf("CrossMsg.GetL2CrossMsgByMsgHashList error: %w", err)
}
if len(results) == 0 {
log.Debug("no CrossMsg under given msg hashes", "msg hash list", msgHashList)
}
return results, nil
}
// GetL2EarliestNoBlockTimestampHeight returns the earliest layer2 cross message height which has no block timestamp
func (c *CrossMsg) GetL2EarliestNoBlockTimestampHeight(ctx context.Context) (uint64, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("block_timestamp IS NULL AND msg_type = ?", Layer2Msg).
Select("height").
Order("height ASC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("CrossMsg.GetL2EarliestNoBlockTimestampHeight error: %w", err)
}
return result.Height, nil
}
// InsertL2CrossMsg batch insert layer2 cross messages
func (c *CrossMsg) InsertL2CrossMsg(ctx context.Context, messages []*CrossMsg, dbTx ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&CrossMsg{}).Create(&messages).Error
if err != nil {
l2hashes := make([]string, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
l2hashes = append(l2hashes, msg.Layer2Hash)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l2 cross messages", "l2hashes", l2hashes, "heights", heights, "err", err)
return fmt.Errorf("CrossMsg.InsertL2CrossMsg error: %w", err)
}
return nil
}
// UpdateL2CrossMsgHash update layer2 cross message hash
func (c *CrossMsg) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash, dbTx ...*gorm.DB) error {
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&CrossMsg{}).
Where("layer2_hash = ?", l2Hash.String()).
Update("msg_hash", msgHash.String()).
Error
if err != nil {
return fmt.Errorf("CrossMsg.UpdateL2CrossMsgHash error: %w", err)
}
return nil
}
// UpdateL2BlockTimestamp update layer2 cross message block timestamp
func (c *CrossMsg) UpdateL2BlockTimestamp(ctx context.Context, height uint64, timestamp time.Time) error {
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("height = ? AND msg_type = ?", height, Layer2Msg).
Update("block_timestamp", timestamp).Error
if err != nil {
return fmt.Errorf("CrossMsg.UpdateL2BlockTimestamp error: %w", err)
}
return nil
}
// DeleteL2CrossMsgFromHeight delete layer2 cross messages from given height
func (c *CrossMsg) DeleteL2CrossMsgFromHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
db := c.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&CrossMsg{}).Delete("height > ? AND msg_type = ?", height, Layer2Msg).Error
if err != nil {
return fmt.Errorf("CrossMsg.DeleteL2CrossMsgFromHeight error: %w", err)
}
return nil
}
// General Operations
// GetTotalCrossMsgCountByAddress get total cross msg count by address
func (c *CrossMsg) GetTotalCrossMsgCountByAddress(ctx context.Context, sender string) (uint64, error) {
var count int64
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("sender = ?", sender).
Count(&count).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("CrossMsg.GetTotalCrossMsgCountByAddress error: %w", err)
}
return uint64(count), nil
}
// GetCrossMsgsByAddressWithOffset get cross msgs by address with offset
func (c *CrossMsg) GetCrossMsgsByAddressWithOffset(ctx context.Context, sender string, offset int, limit int) ([]CrossMsg, error) {
var messages []CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
Where("sender = ?", sender).
Order("block_timestamp DESC NULLS FIRST, id DESC").
Limit(limit).
Offset(offset).
Find(&messages).
Error
if err != nil {
return nil, fmt.Errorf("CrossMsg.GetCrossMsgsByAddressWithOffset error: %w", err)
}
return messages, nil
}

View File

@@ -1,215 +0,0 @@
package orm
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
)
// L2SentMsg defines the struct for l2_sent_msg table record
type L2SentMsg struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id"`
OriginalSender string `json:"original_sender" gorm:"column:original_sender;default:''"`
TxHash string `json:"tx_hash" gorm:"column:tx_hash"`
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
Sender string `json:"sender" gorm:"column:sender"`
Target string `json:"target" gorm:"column:target"`
Value string `json:"value" gorm:"column:value"`
Height uint64 `json:"height" gorm:"column:height"`
Nonce uint64 `json:"nonce" gorm:"column:nonce"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index;default:0"`
MsgProof string `json:"msg_proof" gorm:"column:msg_proof;default:''"`
MsgData string `json:"msg_data" gorm:"column:msg_data;default:''"`
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewL2SentMsg create an NewL2SentMsg instance
func NewL2SentMsg(db *gorm.DB) *L2SentMsg {
return &L2SentMsg{db: db}
}
// TableName returns the table name for the L2SentMsg model.
func (*L2SentMsg) TableName() string {
return "l2_sent_msg"
}
// GetL2SentMsgByHash get l2 sent msg by hash
func (l *L2SentMsg) GetL2SentMsgByHash(ctx context.Context, msgHash string) (*L2SentMsg, error) {
var result L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("msg_hash = ?", msgHash).
First(&result).
Error
if err != nil {
return nil, fmt.Errorf("L2SentMsg.GetL2SentMsgByHash error: %w", err)
}
return &result, nil
}
// GetLatestSentMsgHeightOnL2 get latest sent msg height on l2
func (l *L2SentMsg) GetLatestSentMsgHeightOnL2(ctx context.Context) (uint64, error) {
var result L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Select("height").
Order("nonce DESC").
First(&result).Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("L2SentMsg.GetLatestSentMsgHeightOnL2 error: %w", err)
}
return result.Height, nil
}
// GetClaimableL2SentMsgByAddressWithOffset get claimable l2 sent msg by address with offset
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressWithOffset(ctx context.Context, address string, offset int, limit int) ([]*L2SentMsg, error) {
var results []*L2SentMsg
err := l.db.WithContext(ctx).Raw(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='' ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset).
Scan(&results).Error
if err != nil {
return nil, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressWithOffset error: %w", err)
}
return results, nil
}
// GetClaimableL2SentMsgByAddressTotalNum get claimable l2 sent msg by address total num
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressTotalNum(ctx context.Context, address string) (uint64, error) {
var count uint64
err := l.db.WithContext(ctx).Raw(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='';`, address).
Scan(&count).Error
if err != nil {
return 0, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressTotalNum error: %w", err)
}
return count, nil
}
// GetLatestL2SentMsgBatchIndex get latest l2 sent msg batch index
func (l *L2SentMsg) GetLatestL2SentMsgBatchIndex(ctx context.Context) (int64, error) {
var result L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("batch_index != 0").
Order("batch_index DESC").
Select("batch_index").
First(&result).
Error
if err != nil {
return -1, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgBatchIndex error: %w", err)
}
// Watch for overflow, tho its not likely to happen
return int64(result.Height), nil
}
// GetL2SentMsgMsgHashByHeightRange get l2 sent msg msg hash by height range
func (l *L2SentMsg) GetL2SentMsgMsgHashByHeightRange(ctx context.Context, startHeight, endHeight uint64) ([]*L2SentMsg, error) {
var results []*L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("height >= ? AND height <= ?", startHeight, endHeight).
Order("nonce ASC").
Find(&results).
Error
if err != nil {
return nil, fmt.Errorf("L2SentMsg.GetL2SentMsgMsgHashByHeightRange error: %w", err)
}
return results, nil
}
// GetL2SentMessageByNonce get l2 sent message by nonce
func (l *L2SentMsg) GetL2SentMessageByNonce(ctx context.Context, nonce uint64) (*L2SentMsg, error) {
var result L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("nonce = ?", nonce).
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("L2SentMsg.GetL2SentMessageByNonce error: %w", err)
}
return &result, nil
}
// GetLatestL2SentMsgLEHeight get latest l2 sent msg less than or equal to end block number
func (l *L2SentMsg) GetLatestL2SentMsgLEHeight(ctx context.Context, endBlockNumber uint64) (*L2SentMsg, error) {
var result L2SentMsg
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
Where("height <= ?", endBlockNumber).
Order("nonce DESC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgLEHeight error: %w", err)
}
return &result, nil
}
// InsertL2SentMsg batch insert l2 sent msg
func (l *L2SentMsg) InsertL2SentMsg(ctx context.Context, messages []*L2SentMsg, dbTx ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := l.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&L2SentMsg{}).Create(&messages).Error
if err != nil {
l2hashes := make([]string, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
l2hashes = append(l2hashes, msg.TxHash)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l2 sent messages", "l2hashes", l2hashes, "heights", heights, "err", err)
return fmt.Errorf("L2SentMsg.InsertL2SentMsg error: %w", err)
}
return nil
}
// UpdateL2MessageProof update l2 message proof in db tx
func (l *L2SentMsg) UpdateL2MessageProof(ctx context.Context, msgHash string, proof string, batchIndex uint64, dbTx ...*gorm.DB) error {
db := l.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&L2SentMsg{}).
Where("msg_hash = ?", msgHash).
Updates(map[string]interface{}{
"msg_proof": proof,
"batch_index": batchIndex,
}).Error
if err != nil {
return fmt.Errorf("L2SentMsg.UpdateL2MessageProof error: %w", err)
}
return nil
}
// DeleteL2SentMsgAfterHeight delete l2 sent msg after height
func (l *L2SentMsg) DeleteL2SentMsgAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
db := l.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
err := db.WithContext(ctx).Model(&L2SentMsg{}).Delete("height > ?", height).Error
if err != nil {
return fmt.Errorf("L2SentMsg.DeleteL2SentMsgAfterHeight error: %w", err)
}
return nil
}

View File

@@ -1,61 +0,0 @@
package migrate
import (
"database/sql"
"embed"
"os"
"strconv"
"github.com/pressly/goose/v3"
)
//go:embed migrations/*.sql
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("bridge_history_migrations")
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
goose.SetVerbose(verbose)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
}
// Rollback rollback to the given version
func Rollback(db *sql.DB, version *int64) error {
if version != nil {
return goose.DownTo(db, MigrationsDir, *version)
}
return goose.Down(db, MigrationsDir)
}
// ResetDB clean and migrate db.
func ResetDB(db *sql.DB) error {
if err := Rollback(db, new(int64)); err != nil {
return err
}
return Migrate(db)
}
// Current get current version
func Current(db *sql.DB) (int64, error) {
return goose.GetDBVersion(db)
}
// Status is normal or not
func Status(db *sql.DB) error {
return goose.Version(db, MigrationsDir)
}
// Create a new migration folder
func Create(db *sql.DB, name, migrationType string) error {
return goose.Create(db, MigrationsDir, name, migrationType)
}

View File

@@ -1,57 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table cross_message
(
id BIGSERIAL PRIMARY KEY,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
amount VARCHAR NOT NULL,
layer1_hash VARCHAR NOT NULL DEFAULT '',
layer2_hash VARCHAR NOT NULL DEFAULT '',
layer1_token VARCHAR NOT NULL DEFAULT '',
layer2_token VARCHAR NOT NULL DEFAULT '',
asset SMALLINT NOT NULL,
msg_type SMALLINT NOT NULL,
token_ids TEXT NOT NULL DEFAULT '',
token_amounts TEXT NOT NULL DEFAULT '',
block_timestamp TIMESTAMP(0) DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
create unique index uk_msg_hash_msg_type
on cross_message (msg_hash, msg_type) where deleted_at IS NULL;
comment
on column cross_message.asset is 'ETH, ERC20, ERC721, ERC1155';
comment
on column cross_message.msg_type is 'unknown, l1msg, l2msg';
CREATE INDEX idx_l1_msg_index ON cross_message (layer1_hash, deleted_at);
CREATE INDEX idx_l2_msg_index ON cross_message (layer2_hash, deleted_at);
CREATE INDEX idx_height_msg_type_index ON cross_message (height, msg_type, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON cross_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists cross_message;
-- +goose StatementEnd

View File

@@ -1,41 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table relayed_msg
(
id BIGSERIAL PRIMARY KEY,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
layer1_hash VARCHAR NOT NULL DEFAULT '',
layer2_hash VARCHAR NOT NULL DEFAULT '',
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
create unique index uk_msg_hash_l1_hash_l2_hash
on relayed_msg (msg_hash, layer1_hash, layer2_hash) where deleted_at IS NULL;
CREATE INDEX idx_l1_msg_relayed_msg ON relayed_msg (layer1_hash, deleted_at);
CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at);
CREATE INDEX idx_msg_hash_deleted_at_relayed_msg on relayed_msg (msg_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON relayed_msg FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists relayed_msg;
-- +goose StatementEnd

View File

@@ -1,47 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table l2_sent_msg
(
id BIGSERIAL PRIMARY KEY,
original_sender VARCHAR NOT NULL DEFAULT '',
tx_hash VARCHAR NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
nonce BIGINT NOT NULL,
batch_index BIGINT NOT NULL DEFAULT 0,
msg_proof TEXT NOT NULL DEFAULT '',
msg_data TEXT NOT NULL DEFAULT '',
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
create unique index uk_msg_hash
on l2_sent_msg (msg_hash) where deleted_at IS NULL;
create unique index uk_nonce
on l2_sent_msg (nonce) where deleted_at IS NULL;
CREATE INDEX idx_msg_hash_deleted_at_l2_sent_msg on l2_sent_msg (msg_hash, deleted_at);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l2_sent_msg FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l2_sent_msg;
-- +goose StatementEnd

View File

@@ -1,39 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table rollup_batch
(
id BIGSERIAL PRIMARY KEY,
batch_index BIGINT NOT NULL,
commit_height BIGINT NOT NULL,
start_block_number BIGINT NOT NULL,
end_block_number BIGINT NOT NULL,
batch_hash VARCHAR NOT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
create unique index uk_batch_index
on rollup_batch (batch_index) where deleted_at IS NULL;
create unique index uk_batch_hash
on rollup_batch (batch_hash) where deleted_at IS NULL;
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON rollup_batch FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists rollup_batch;
-- +goose StatementEnd

View File

@@ -1,142 +0,0 @@
package orm
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
)
// RelayedMsg is the struct for relayed_msg table
type RelayedMsg struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id"`
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
Height uint64 `json:"height" gorm:"column:height"`
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:''"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:''"`
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewRelayedMsg create an NewRelayedMsg instance
func NewRelayedMsg(db *gorm.DB) *RelayedMsg {
return &RelayedMsg{db: db}
}
// TableName returns the table name for the RelayedMsg model.
func (*RelayedMsg) TableName() string {
return "relayed_msg"
}
// GetRelayedMsgByHash get relayed msg by hash
func (r *RelayedMsg) GetRelayedMsgByHash(ctx context.Context, msgHash string) (*RelayedMsg, error) {
var result RelayedMsg
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
Where("msg_hash = ?", msgHash).
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("RelayedMsg.GetRelayedMsgByHash error: %w", err)
}
return &result, nil
}
// GetLatestRelayedHeightOnL1 get latest relayed height on l1
func (r *RelayedMsg) GetLatestRelayedHeightOnL1(ctx context.Context) (uint64, error) {
var result RelayedMsg
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
Select("height").
Where("layer1_hash != ''").
Order("height DESC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("RelayedMsg.GetLatestRelayedHeightOnL1 error: %w", err)
}
return result.Height, err
}
// GetLatestRelayedHeightOnL2 get latest relayed height on l2
func (r *RelayedMsg) GetLatestRelayedHeightOnL2(ctx context.Context) (uint64, error) {
var result RelayedMsg
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
Select("height").
Where("layer2_hash != ''").
Order("height DESC").
First(&result).
Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("RelayedMsg.GetLatestRelayedHeightOnL2 error: %w", err)
}
return result.Height, nil
}
// InsertRelayedMsg batch insert relayed msg into db and return the transaction
func (r *RelayedMsg) InsertRelayedMsg(ctx context.Context, messages []*RelayedMsg, dbTx ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := r.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&RelayedMsg{}).Create(&messages).Error
if err != nil {
l2hashes := make([]string, 0, len(messages))
l1hashes := make([]string, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
l2hashes = append(l2hashes, msg.Layer2Hash)
l1hashes = append(l1hashes, msg.Layer1Hash)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l2 sent messages", "l2hashes", l2hashes, "l1hashes", l1hashes, "heights", heights, "err", err)
return fmt.Errorf("RelayedMsg.InsertRelayedMsg error: %w", err)
}
return nil
}
// DeleteL1RelayedHashAfterHeight delete l1 relayed hash after height
func (r *RelayedMsg) DeleteL1RelayedHashAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
db := r.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&RelayedMsg{}).
Delete("height > ? AND layer1_hash != ''", height).Error
if err != nil {
return fmt.Errorf("RelayedMsg.DeleteL1RelayedHashAfterHeight error: %w", err)
}
return nil
}
// DeleteL2RelayedHashAfterHeight delete l2 relayed hash after heights
func (r *RelayedMsg) DeleteL2RelayedHashAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
db := r.db
if len(dbTx) > 0 && dbTx[0] != nil {
db = dbTx[0]
}
db.WithContext(ctx)
err := db.Model(&RelayedMsg{}).
Delete("height > ? AND layer2_hash != ''", height).Error
if err != nil {
return fmt.Errorf("RelayedMsg.DeleteL2RelayedHashAfterHeight error: %w", err)
}
return nil
}

View File

@@ -1,247 +0,0 @@
package service
import (
"context"
"strconv"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
"bridge-history-api/orm"
)
// Finalized the schema of tx finalized infos
type Finalized struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss
}
// UserClaimInfo the schema of tx claim infos
type UserClaimInfo struct {
From string `json:"from"`
To string `json:"to"`
Value string `json:"value"`
Nonce string `json:"nonce"`
BatchHash string `json:"batch_hash"`
Message string `json:"message"`
Proof string `json:"proof"`
BatchIndex string `json:"batch_index"`
}
// TxHistoryInfo the schema of tx history infos
type TxHistoryInfo struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp *time.Time `json:"blockTimestamp"` // useless
FinalizeTx *Finalized `json:"finalizeTx"`
ClaimInfo *UserClaimInfo `json:"claimInfo"`
CreatedAt *time.Time `json:"createdTime"`
}
// HistoryService example service.
type HistoryService interface {
GetTxsByAddress(address common.Address, offset int, limit int) ([]*TxHistoryInfo, uint64, error)
GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error)
GetClaimableTxsByAddress(address common.Address, offset int, limit int) ([]*TxHistoryInfo, uint64, error)
}
// NewHistoryService returns a service backed with a "db"
func NewHistoryService(ctx context.Context, db *gorm.DB) HistoryService {
service := &historyBackend{ctx: ctx, db: db, prefix: "Scroll-Bridge-History-Server"}
return service
}
type historyBackend struct {
prefix string
ctx context.Context
db *gorm.DB
}
// GetCrossTxClaimInfo get UserClaimInfos by address
func GetCrossTxClaimInfo(ctx context.Context, msgHash string, db *gorm.DB) *UserClaimInfo {
l2SentMsgOrm := orm.NewL2SentMsg(db)
rollupOrm := orm.NewRollupBatch(db)
l2sentMsg, err := l2SentMsgOrm.GetL2SentMsgByHash(ctx, msgHash)
if err != nil || l2sentMsg == nil {
log.Debug("GetCrossTxClaimInfo failed", "error", err)
return &UserClaimInfo{}
}
batch, err := rollupOrm.GetRollupBatchByIndex(ctx, l2sentMsg.BatchIndex)
if err != nil {
log.Debug("GetCrossTxClaimInfo failed", "error", err)
return &UserClaimInfo{}
}
return &UserClaimInfo{
From: l2sentMsg.Sender,
To: l2sentMsg.Target,
Value: l2sentMsg.Value,
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
Message: l2sentMsg.MsgData,
Proof: "0x" + l2sentMsg.MsgProof,
BatchHash: batch.BatchHash,
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
}
}
func updateCrossTxHash(ctx context.Context, msgHash string, txInfo *TxHistoryInfo, db *gorm.DB) {
relayed := orm.NewRelayedMsg(db)
relayed, err := relayed.GetRelayedMsgByHash(ctx, msgHash)
if err != nil {
log.Debug("updateCrossTxHash failed", "error", err)
return
}
if relayed == nil {
return
}
if relayed.Layer1Hash != "" {
txInfo.FinalizeTx.Hash = relayed.Layer1Hash
txInfo.FinalizeTx.BlockNumber = relayed.Height
return
}
if relayed.Layer2Hash != "" {
txInfo.FinalizeTx.Hash = relayed.Layer2Hash
txInfo.FinalizeTx.BlockNumber = relayed.Height
return
}
}
// GetClaimableTxsByAddress get all claimable txs under given address
func (h *historyBackend) GetClaimableTxsByAddress(address common.Address, offset int, limit int) ([]*TxHistoryInfo, uint64, error) {
var txHistories []*TxHistoryInfo
l2SentMsgOrm := orm.NewL2SentMsg(h.db)
l2CrossMsgOrm := orm.NewCrossMsg(h.db)
total, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressTotalNum(h.ctx, address.Hex())
if err != nil || total == 0 {
return txHistories, 0, err
}
results, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressWithOffset(h.ctx, address.Hex(), offset, limit)
if err != nil || len(results) == 0 {
return txHistories, 0, err
}
var msgHashList []string
for _, result := range results {
msgHashList = append(msgHashList, result.MsgHash)
}
crossMsgs, err := l2CrossMsgOrm.GetL2CrossMsgByMsgHashList(h.ctx, msgHashList)
// crossMsgs can be empty, because they can be emitted by user directly call contract
if err != nil {
return txHistories, 0, err
}
crossMsgMap := make(map[string]*orm.CrossMsg)
for _, crossMsg := range crossMsgs {
crossMsgMap[crossMsg.MsgHash] = crossMsg
}
for _, result := range results {
txInfo := &TxHistoryInfo{
Hash: result.TxHash,
IsL1: false,
BlockNumber: result.Height,
FinalizeTx: &Finalized{},
ClaimInfo: GetCrossTxClaimInfo(h.ctx, result.MsgHash, h.db),
}
if crossMsg, exist := crossMsgMap[result.MsgHash]; exist {
txInfo.Amount = crossMsg.Amount
txInfo.To = crossMsg.Target
txInfo.BlockTimestamp = crossMsg.Timestamp
txInfo.CreatedAt = crossMsg.CreatedAt
}
txHistories = append(txHistories, txInfo)
}
return txHistories, total, err
}
// GetTxsByAddress get all txs under given address
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int, limit int) ([]*TxHistoryInfo, uint64, error) {
var txHistories []*TxHistoryInfo
utilOrm := orm.NewCrossMsg(h.db)
total, err := utilOrm.GetTotalCrossMsgCountByAddress(h.ctx, address.String())
if err != nil || total == 0 {
return txHistories, 0, err
}
result, err := utilOrm.GetCrossMsgsByAddressWithOffset(h.ctx, address.String(), offset, limit)
if err != nil {
return nil, 0, err
}
for _, msg := range result {
txHistory := &TxHistoryInfo{
Hash: msg.Layer1Hash + msg.Layer2Hash,
Amount: msg.Amount,
To: msg.Target,
IsL1: msg.MsgType == int(orm.Layer1Msg),
BlockNumber: msg.Height,
BlockTimestamp: msg.Timestamp,
CreatedAt: msg.CreatedAt,
FinalizeTx: &Finalized{
Hash: "",
},
ClaimInfo: GetCrossTxClaimInfo(h.ctx, msg.MsgHash, h.db),
}
updateCrossTxHash(h.ctx, msg.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
}
return txHistories, total, nil
}
// GetTxsByHashes get tx infos under given tx hashes
func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) {
txHistories := make([]*TxHistoryInfo, 0)
CrossMsgOrm := orm.NewCrossMsg(h.db)
for _, hash := range hashes {
l1result, err := CrossMsgOrm.GetL1CrossMsgByHash(h.ctx, common.HexToHash(hash))
if err != nil {
return nil, err
}
if l1result != nil {
txHistory := &TxHistoryInfo{
Hash: l1result.Layer1Hash,
Amount: l1result.Amount,
To: l1result.Target,
IsL1: true,
BlockNumber: l1result.Height,
BlockTimestamp: l1result.Timestamp,
CreatedAt: l1result.CreatedAt,
FinalizeTx: &Finalized{
Hash: "",
},
}
updateCrossTxHash(h.ctx, l1result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
continue
}
l2result, err := CrossMsgOrm.GetL2CrossMsgByHash(h.ctx, common.HexToHash(hash))
if err != nil {
return nil, err
}
if l2result != nil {
txHistory := &TxHistoryInfo{
Hash: l2result.Layer2Hash,
Amount: l2result.Amount,
To: l2result.Target,
IsL1: false,
BlockNumber: l2result.Height,
BlockTimestamp: l2result.Timestamp,
CreatedAt: l2result.CreatedAt,
FinalizeTx: &Finalized{
Hash: "",
},
ClaimInfo: GetCrossTxClaimInfo(h.ctx, l2result.MsgHash, h.db),
}
updateCrossTxHash(h.ctx, l2result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
continue
}
}
return txHistories, nil
}

View File

@@ -1,69 +0,0 @@
package utils
import (
"github.com/urfave/cli/v2"
)
var (
// CommonFlags is used for app common flags in different modules
CommonFlags = []cli.Flag{
&ConfigFileFlag,
&VerbosityFlag,
&LogFileFlag,
&LogJSONFormat,
&LogDebugFlag,
&MetricsEnabled,
&MetricsAddr,
&MetricsPort,
}
// ConfigFileFlag load json type config file.
ConfigFileFlag = cli.StringFlag{
Name: "config",
Usage: "JSON configuration file",
Value: "./config.json",
}
// VerbosityFlag log level.
VerbosityFlag = cli.IntFlag{
Name: "verbosity",
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail",
Value: 3,
}
// LogFileFlag decides where the logger output is sent. If this flag is left
// empty, it will log to stdout.
LogFileFlag = cli.StringFlag{
Name: "log.file",
Usage: "Tells the module where to write log entries",
}
// LogJSONFormat decides the log format is json or not
LogJSONFormat = cli.BoolFlag{
Name: "log.json",
Usage: "Tells the module whether log format is json or not",
Value: true,
}
// LogDebugFlag make log messages with call-site location
LogDebugFlag = cli.BoolFlag{
Name: "log.debug",
Usage: "Prepends log messages with call-site location (file and line number)",
}
// MetricsEnabled enable metrics collection and reporting
MetricsEnabled = cli.BoolFlag{
Name: "metrics",
Usage: "Enable metrics collection and reporting",
Category: "METRICS",
Value: false,
}
// MetricsAddr is listening address of Metrics reporting server
MetricsAddr = cli.StringFlag{
Name: "metrics.addr",
Usage: "Metrics reporting server listening address",
Category: "METRICS",
Value: "127.0.0.1",
}
// MetricsPort is listening port of Metrics reporting server
MetricsPort = cli.IntFlag{
Name: "metrics.port",
Usage: "Metrics reporting server listening port",
Category: "METRICS",
Value: 6060,
}
)

View File

@@ -1,43 +0,0 @@
package utils
import (
"io"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
)
// LogSetup is for setup logger
func LogSetup(ctx *cli.Context) error {
var ostream log.Handler
if logFile := ctx.String(LogFileFlag.Name); len(logFile) > 0 {
fp, err := os.OpenFile(filepath.Clean(logFile), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
utils.Fatalf("Failed to open log file", "err", err)
}
if ctx.Bool(LogJSONFormat.Name) {
ostream = log.StreamHandler(io.Writer(fp), log.JSONFormat())
} else {
ostream = log.StreamHandler(io.Writer(fp), log.TerminalFormat(true))
}
} else {
output := io.Writer(os.Stderr)
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
ostream = log.StreamHandler(output, log.TerminalFormat(usecolor))
}
// show the call file and line number
log.PrintOrigins(ctx.Bool(LogDebugFlag.Name))
glogger := log.NewGlogHandler(ostream)
// Set log level
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
return nil
}

View File

@@ -1,412 +0,0 @@
package utils
import (
"context"
"math/big"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
backendabi "bridge-history-api/abi"
"bridge-history-api/orm"
)
// CachedParsedTxCalldata store parsed batch infos
type CachedParsedTxCalldata struct {
CallDataIndex uint64
BatchIndices []uint64
StartBlocks []uint64
EndBlocks []uint64
}
// ParseBackendL1EventLogs parses L1 watched events
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l1CrossMsg []*orm.CrossMsg
var relayedMsgs []*orm.RelayedMsg
var msgHash string
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1DepositETHSig:
event := backendabi.DepositETH{}
err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
if err != nil {
log.Warn("Failed to unpack DepositETH event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ETH),
Layer1Hash: vlog.TxHash.Hex(),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
})
case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC20 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ERC20),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
})
case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC721 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
})
case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
})
case backendabi.L1SentMessageEventSignature:
event := backendabi.L1SentMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
// since every deposit event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
msgHash = ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).Hex()
case backendabi.L1BatchDepositERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "BatchDepositERC721", vlog)
if err != nil {
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
})
case backendabi.L1BatchDepositERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
MsgType: int(orm.Layer1Msg),
MsgHash: msgHash,
})
case backendabi.L1RelayedMessageEventSignature:
event := backendabi.L1RelayedMessageEvent{}
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l1CrossMsg, relayedMsgs, err
}
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(),
Height: vlog.BlockNumber,
Layer1Hash: vlog.TxHash.Hex(),
})
}
}
return l1CrossMsg, relayedMsgs, nil
}
// ParseBackendL2EventLogs parses L2 watched events
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l2CrossMsg []*orm.CrossMsg
// this is use to confirm finalized l1 msg
var relayedMsgs []*orm.RelayedMsg
var l2SentMsgs []*orm.L2SentMsg
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2WithdrawETHSig:
event := backendabi.DepositETH{}
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawETH event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ETH),
Layer2Hash: vlog.TxHash.Hex(),
MsgType: int(orm.Layer2Msg),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ERC20),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
MsgType: int(orm.Layer2Msg),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2WithdrawERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(),
MsgType: int(orm.Layer2Msg),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenIDs: event.TokenID.String(),
Amount: event.Amount.String(),
MsgType: int(orm.Layer2Msg),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2BatchWithdrawERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
MsgType: int(orm.Layer2Msg),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2BatchWithdrawERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
MsgType: int(orm.Layer2Msg),
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
})
case backendabi.L2SentMessageEventSignature:
event := backendabi.L2SentMessageEvent{}
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
// since every withdraw event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
l2SentMsgs = append(l2SentMsgs,
&orm.L2SentMsg{
Sender: event.Sender.Hex(),
TxHash: vlog.TxHash.Hex(),
Target: event.Target.Hex(),
Value: event.Value.String(),
MsgHash: msgHash.Hex(),
Height: vlog.BlockNumber,
Nonce: event.MessageNonce.Uint64(),
MsgData: hexutil.Encode(event.Message),
})
case backendabi.L2RelayedMessageEventSignature:
event := backendabi.L2RelayedMessageEvent{}
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
}
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(),
Height: vlog.BlockNumber,
Layer2Hash: vlog.TxHash.Hex(),
})
}
}
return l2CrossMsg, relayedMsgs, l2SentMsgs, nil
}
// ParseBatchInfoFromScrollChain parses ScrollChain events
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
var rollupBatches []*orm.RollupBatch
cache := make(map[string]CachedParsedTxCalldata)
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1CommitBatchEventSignature:
event := backendabi.L1CommitBatchEvent{}
err := UnpackLog(backendabi.ScrollChainABI, &event, "CommitBatch", vlog)
if err != nil {
log.Warn("Failed to unpack CommitBatch event", "err", err)
return rollupBatches, err
}
if _, ok := cache[vlog.TxHash.Hex()]; ok {
c := cache[vlog.TxHash.Hex()]
c.CallDataIndex++
rollupBatches = append(rollupBatches, &orm.RollupBatch{
CommitHeight: vlog.BlockNumber,
BatchIndex: c.BatchIndices[c.CallDataIndex],
BatchHash: event.BatchHash.Hex(),
StartBlockNumber: c.StartBlocks[c.CallDataIndex],
EndBlockNumber: c.EndBlocks[c.CallDataIndex],
})
cache[vlog.TxHash.Hex()] = c
continue
}
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending {
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
return rollupBatches, err
}
indices, startBlocks, endBlocks, err := GetBatchRangeFromCalldataV1(commitTx.Data())
if err != nil {
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().Hex(), "height", vlog.BlockNumber)
return rollupBatches, err
}
cache[vlog.TxHash.Hex()] = CachedParsedTxCalldata{
CallDataIndex: 0,
BatchIndices: indices,
StartBlocks: startBlocks,
EndBlocks: endBlocks,
}
rollupBatches = append(rollupBatches, &orm.RollupBatch{
CommitHeight: vlog.BlockNumber,
BatchIndex: indices[0],
BatchHash: event.BatchHash.Hex(),
StartBlockNumber: startBlocks[0],
EndBlockNumber: endBlocks[0],
})
default:
continue
}
}
return rollupBatches, nil
}
func convertBigIntArrayToString(array []*big.Int) string {
stringArray := make([]string, len(array))
for i, num := range array {
stringArray[i] = num.String()
}
result := strings.Join(stringArray, ", ")
return result
}

View File

@@ -1,157 +0,0 @@
package utils
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
backendabi "bridge-history-api/abi"
)
// Keccak2 compute the keccack256 of two concatenations of bytes32
func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
}
// GetSafeBlockNumber get the safe block number, which is the current block number minus the confirmations
func GetSafeBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
number, err := client.BlockNumber(ctx)
if err != nil || number <= confirmations {
return 0, err
}
number = number - confirmations
return number, nil
}
// UnpackLog unpacks a retrieved log into the provided output structure.
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
sender common.Address,
target common.Address,
value *big.Int,
messageNonce *big.Int,
message []byte,
) common.Hash {
data, _ := backendabi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
return common.BytesToHash(crypto.Keccak256(data))
}
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
// GetBatchRangeFromCalldataV2 find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldataV2(calldata []byte) (uint64, uint64, uint64, error) {
method := backendabi.ScrollChainV2ABI.Methods["commitBatch"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return 0, 0, 0, err
}
args := commitBatchArgs{}
err = method.Inputs.Copy(&args, values)
if err != nil {
return 0, 0, 0, err
}
var startBlock uint64
var finishBlock uint64
// decode batchIndex from ParentBatchHeader
if len(args.ParentBatchHeader) < 9 {
return 0, 0, 0, errors.New("invalid parent batch header")
}
batchIndex := binary.BigEndian.Uint64(args.ParentBatchHeader[1:9]) + 1
// decode blocks from chunk and assume that there's no empty chunk
// | 1 byte | 60 bytes | ... | 60 bytes |
// | num blocks | block 1 | ... | block n |
if len(args.Chunks) == 0 {
return 0, 0, 0, errors.New("invalid chunks")
}
chunk := args.Chunks[0]
block := chunk[1:61] // first block in chunk
startBlock = binary.BigEndian.Uint64(block[0:8])
chunk = args.Chunks[len(args.Chunks)-1]
lastBlockIndex := int(chunk[0]) - 1
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
finishBlock = binary.BigEndian.Uint64(block[0:8])
return batchIndex, startBlock, finishBlock, err
}
// GetBatchRangeFromCalldataV1 find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldataV1(calldata []byte) ([]uint64, []uint64, []uint64, error) {
var batchIndices []uint64
var startBlocks []uint64
var finishBlocks []uint64
if bytes.Equal(calldata[0:4], common.Hex2Bytes("cb905499")) {
// commitBatches
method := backendabi.ScrollChainABI.Methods["commitBatches"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return batchIndices, startBlocks, finishBlocks, err
}
args := make([]backendabi.IScrollChainBatch, len(values))
err = method.Inputs.Copy(&args, values)
if err != nil {
return batchIndices, startBlocks, finishBlocks, err
}
for i := 0; i < len(args); i++ {
batchIndices = append(batchIndices, args[i].BatchIndex)
startBlocks = append(startBlocks, args[i].Blocks[0].BlockNumber)
finishBlocks = append(finishBlocks, args[i].Blocks[len(args[i].Blocks)-1].BlockNumber)
}
} else if bytes.Equal(calldata[0:4], common.Hex2Bytes("8c73235d")) {
// commitBatch
method := backendabi.ScrollChainABI.Methods["commitBatch"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return batchIndices, startBlocks, finishBlocks, err
}
args := backendabi.IScrollChainBatch{}
err = method.Inputs.Copy(&args, values)
if err != nil {
return batchIndices, startBlocks, finishBlocks, err
}
batchIndices = append(batchIndices, args.BatchIndex)
startBlocks = append(startBlocks, args.Blocks[0].BlockNumber)
finishBlocks = append(finishBlocks, args.Blocks[len(args.Blocks)-1].BlockNumber)
} else {
return batchIndices, startBlocks, finishBlocks, errors.New("invalid selector")
}
return batchIndices, startBlocks, finishBlocks, nil
}

View File

@@ -1,64 +0,0 @@
package utils_test
import (
"os"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/assert"
"bridge-history-api/utils"
)
func TestKeccak2(t *testing.T) {
a := common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0")
b := common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c")
c := utils.Keccak2(a, b)
assert.NotEmpty(t, c)
assert.NotEqual(t, a, c)
assert.NotEqual(t, b, c)
assert.Equal(t, "0xc0ffbd7f501bd3d49721b0724b2bff657cb2378f15d5a9b97cd7ea5bf630d512", c.Hex())
}
func TestGetBatchRangeFromCalldataV2(t *testing.T) {
// single chunk
batchIndex, start, finish, err := utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003d0100000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000100000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000"))
assert.NoError(t, err)
assert.Equal(t, start, uint64(1))
assert.Equal(t, finish, uint64(1))
assert.Equal(t, batchIndex, uint64(1))
// multiple chunk
batchIndex, start, finish, err = utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000007900000000000000000100000000000000010000000000000001038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000004c01000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030000000000010000000001000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b403000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005000300000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00050000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c01000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa"))
assert.NoError(t, err)
assert.Equal(t, start, uint64(10))
assert.Equal(t, finish, uint64(20))
assert.Equal(t, batchIndex, uint64(2))
}
func TestGetBatchRangeFromCalldataV1(t *testing.T) {
calldata, err := os.ReadFile("../testdata/commit-batches-0x3095e91db7ba4a6fbf4654d607db322e58ff5579c502219c8024acaea74cf311.txt")
assert.NoError(t, err)
// multiple batches
batchIndices, startBlocks, finishBlocks, err := utils.GetBatchRangeFromCalldataV1(common.Hex2Bytes(string(calldata[:])))
assert.NoError(t, err)
assert.Equal(t, len(batchIndices), 5)
assert.Equal(t, len(startBlocks), 5)
assert.Equal(t, len(finishBlocks), 5)
assert.Equal(t, batchIndices[0], uint64(1))
assert.Equal(t, batchIndices[1], uint64(2))
assert.Equal(t, batchIndices[2], uint64(3))
assert.Equal(t, batchIndices[3], uint64(4))
assert.Equal(t, batchIndices[4], uint64(5))
assert.Equal(t, startBlocks[0], uint64(1))
assert.Equal(t, startBlocks[1], uint64(6))
assert.Equal(t, startBlocks[2], uint64(7))
assert.Equal(t, startBlocks[3], uint64(19))
assert.Equal(t, startBlocks[4], uint64(20))
assert.Equal(t, finishBlocks[0], uint64(5))
assert.Equal(t, finishBlocks[1], uint64(6))
assert.Equal(t, finishBlocks[2], uint64(18))
assert.Equal(t, finishBlocks[3], uint64(19))
assert.Equal(t, finishBlocks[4], uint64(20))
}

View File

@@ -5,8 +5,8 @@ IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
mock_abi:
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL1.go
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL2.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
bridge_bins: ## Builds the Bridge bins.
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/

File diff suppressed because one or more lines are too long

View File

@@ -1,4 +1,4 @@
package bridgeabi
package bridgeabi_test
import (
"math/big"
@@ -6,100 +6,105 @@ import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
bridge_abi "scroll-tech/bridge/abi"
)
func TestEventSignature(t *testing.T) {
assert := assert.New(t)
assert.Equal(L1SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
assert.Equal(L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(bridge_abi.L1SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
assert.Equal(bridge_abi.L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(bridge_abi.L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62"))
assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("9d3058a3cb9739a2527f22dd9a4138065844037d3004254952e2458d808cc364"))
assert.Equal(bridge_abi.L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62"))
assert.Equal(bridge_abi.L1FinalizeBatchEventSignature, common.HexToHash("6be443154c959a7a1645b4392b6fa97d8e8ab6e8fd853d7085e8867083737d79"))
assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e"))
assert.Equal(bridge_abi.L1QueueTransactionEventSignature, common.HexToHash("bdcc7517f8fe3db6506dfd910942d0bbecaf3d6a506dadea65b0d988e75b9439"))
assert.Equal(L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
assert.Equal(L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(bridge_abi.L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
assert.Equal(bridge_abi.L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(bridge_abi.L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(L2ImportBlockEventSignature, common.HexToHash("a7823f45e1ee21f9530b77959b57507ad515a14fa9fa24d262ee80e79b2b5745"))
assert.Equal(bridge_abi.L2ImportBlockEventSignature, common.HexToHash("a7823f45e1ee21f9530b77959b57507ad515a14fa9fa24d262ee80e79b2b5745"))
assert.Equal(L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693"))
assert.Equal(bridge_abi.L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693"))
}
func TestPackRelayL2MessageWithProof(t *testing.T) {
assert := assert.New(t)
l1MessengerABI, err := L1ScrollMessengerMetaData.GetAbi()
l1MessengerABI, err := bridge_abi.L1ScrollMessengerMetaData.GetAbi()
assert.NoError(err)
proof := IL1ScrollMessengerL2MessageProof{
BatchIndex: big.NewInt(0),
MerkleProof: []byte{},
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BatchHash: common.Hash{},
MerkleProof: make([]byte, 0),
}
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), []byte{}, proof)
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
assert.NoError(err)
}
func TestPackCommitBatch(t *testing.T) {
assert := assert.New(t)
scrollChainABI, err := ScrollChainMetaData.GetAbi()
scrollChainABI, err := bridge_abi.ScrollChainMetaData.GetAbi()
assert.NoError(err)
version := uint8(1)
var parentBatchHeader []byte
var chunks [][]byte
var skippedL1MessageBitmap []byte
header := bridge_abi.IScrollChainBlockContext{
BlockHash: common.Hash{},
ParentHash: common.Hash{},
BlockNumber: 0,
Timestamp: 0,
BaseFee: big.NewInt(0),
GasLimit: 0,
NumTransactions: 0,
NumL1Messages: 0,
}
_, err = scrollChainABI.Pack("commitBatch", version, parentBatchHeader, chunks, skippedL1MessageBitmap)
batch := bridge_abi.IScrollChainBatch{
Blocks: []bridge_abi.IScrollChainBlockContext{header},
PrevStateRoot: common.Hash{},
NewStateRoot: common.Hash{},
WithdrawTrieRoot: common.Hash{},
BatchIndex: 0,
L2Transactions: make([]byte, 0),
}
_, err = scrollChainABI.Pack("commitBatch", batch)
assert.NoError(err)
}
func TestPackFinalizeBatchWithProof(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := ScrollChainMetaData.GetAbi()
l1RollupABI, err := bridge_abi.ScrollChainMetaData.GetAbi()
assert.NoError(err)
batchHeader := []byte{}
prevStateRoot := common.Hash{}
postStateRoot := common.Hash{}
withdrawRoot := common.Hash{}
aggrProof := []byte{}
proof := make([]*big.Int, 10)
instance := make([]*big.Int, 10)
for i := 0; i < 10; i++ {
proof[i] = big.NewInt(0)
instance[i] = big.NewInt(0)
}
_, err = l1RollupABI.Pack("finalizeBatchWithProof", batchHeader, prevStateRoot, postStateRoot, withdrawRoot, aggrProof)
assert.NoError(err)
}
func TestPackImportGenesisBatch(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(err)
batchHeader := []byte{}
stateRoot := common.Hash{}
_, err = l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
_, err = l1RollupABI.Pack("finalizeBatchWithProof", common.Hash{}, proof, instance)
assert.NoError(err)
}
func TestPackRelayL1Message(t *testing.T) {
assert := assert.New(t)
l2MessengerABI, err := L2ScrollMessengerMetaData.GetAbi()
l2MessengerABI, err := bridge_abi.L2ScrollMessengerMetaData.GetAbi()
assert.NoError(err)
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), []byte{})
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0))
assert.NoError(err)
}
func TestPackSetL1BaseFee(t *testing.T) {
assert := assert.New(t)
l1GasOracleABI, err := L1GasPriceOracleMetaData.GetAbi()
l1GasOracleABI, err := bridge_abi.L1GasPriceOracleMetaData.GetAbi()
assert.NoError(err)
baseFee := big.NewInt(2333)
@@ -110,7 +115,7 @@ func TestPackSetL1BaseFee(t *testing.T) {
func TestPackSetL2BaseFee(t *testing.T) {
assert := assert.New(t)
l2GasOracleABI, err := L2GasPriceOracleMetaData.GetAbi()
l2GasOracleABI, err := bridge_abi.L2GasPriceOracleMetaData.GetAbi()
assert.NoError(err)
baseFee := big.NewInt(2333)
@@ -121,8 +126,8 @@ func TestPackSetL2BaseFee(t *testing.T) {
func TestPackImportBlock(t *testing.T) {
assert := assert.New(t)
l1BlockContainerABI := L1BlockContainerABI
l1BlockContainerABI := bridge_abi.L1BlockContainerABI
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, []byte{}, false)
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, make([]byte, 0), false)
assert.NoError(err)
}

View File

@@ -11,36 +11,43 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/watcher"
"scroll-tech/bridge/config"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var app *cli.App
var (
app *cli.App
)
func init() {
// Set up event-watcher app info.
app = cli.NewApp()
app.Action = action
app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
return cutils.LogSetup(ctx)
}
// Register `event-watcher-test` app for integration-test.
utils.RegisterSimulation(app, utils.EventWatcherApp)
cutils.RegisterSimulation(app, cutils.EventWatcherApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -48,13 +55,15 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
@@ -72,17 +81,17 @@ func action(ctx *cli.Context) error {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
go utils.Loop(subCtx, 10*time.Second, func() {
go cutils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr)
}
})
// Start l2 watcher process
go utils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions
log.Info("Start event-watcher successfully")

View File

@@ -11,56 +11,64 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher"
butils "scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var app *cli.App
var (
app *cli.App
)
func init() {
// Set up gas-oracle app info.
app = cli.NewApp()
app.Action = action
app.Name = "gas-oracle"
app.Usage = "The Scroll Gas Oracle"
app.Description = "Scroll Gas Oracle."
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
return cutils.LogSetup(ctx)
}
// Register `gas-oracle-test` app for integration-test.
utils.RegisterSimulation(app, utils.GasOracleApp)
cutils.RegisterSimulation(app, cutils.GasOracleApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
@@ -77,22 +85,21 @@ func action(ctx *cli.Context) error {
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1 watcher process
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
@@ -104,8 +111,8 @@ func action(ctx *cli.Context) error {
})
// Start l1relayer process
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")

View File

@@ -11,7 +11,7 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/config"
)
// MockApp mockApp-test client manager.

View File

@@ -7,54 +7,64 @@ import (
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
cutils "scroll-tech/common/utils"
)
var app *cli.App
var (
app *cli.App
)
func init() {
// Set up message-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "message-relayer"
app.Usage = "The Scroll Message Relayer"
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
return cutils.LogSetup(ctx)
}
// Register `message-relayer-test` app for integration-test.
utils.RegisterSimulation(app, utils.MessageRelayerApp)
cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
@@ -62,14 +72,29 @@ func action(ctx *cli.Context) error {
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1relayer process
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
// Start l2relayer process
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessSavedEvents)
// Finish start all message relayer functions
log.Info("Start message-relayer successfully")

View File

@@ -11,53 +11,60 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
"scroll-tech/bridge/internal/controller/watcher"
butils "scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var app *cli.App
var (
app *cli.App
)
func init() {
// Set up rollup-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, utils.RollupRelayerFlags...)
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
return cutils.LogSetup(ctx)
}
// Register `rollup-relayer-test` app for integration-test.
utils.RegisterSimulation(app, utils.RollupRelayerApp)
cutils.RegisterSimulation(app, cutils.RollupRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
@@ -72,45 +79,37 @@ func action(ctx *cli.Context) error {
return err
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db)
if err != nil {
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, ormFactory)
if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
// Watcher loop to fetch missing blocks
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
l2watcher.TryFetchRunningMissingBlocks(number)
l2watcher.TryFetchRunningMissingBlocks(ctx, number)
})
go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
// Batch proposer loop
go cutils.Loop(subCtx, 2*time.Second, func() {
batchProposer.TryProposeBatch()
batchProposer.TryCommitBatches()
})
go utils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")

View File

@@ -1,7 +1,7 @@
{
"l1_config": {
"confirmations": "0x6",
"endpoint": "DUMMY_ENDPOINT",
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
"l1_message_queue_address": "0x0000000000000000000000000000000000000000",
"scroll_chain_address": "0x0000000000000000000000000000000000000000",
@@ -31,7 +31,7 @@
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1313131313131313131313131313131313131313131313131313131313131313"
"1212121212121212121212121212121212121212121212121212121212121212"
]
}
},
@@ -45,7 +45,7 @@
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "DUMMY_ENDPOINT",
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"check_pending_time": 10,
"check_balance_time": 100,
"escalate_blocks": 100,
@@ -66,26 +66,24 @@
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1313131313131313131313131313131313131313131313131313131313131313"
"1212121212121212121212121212121212121212121212121212121212121212"
],
"rollup_sender_private_keys": [
"1414141414141414141414141414141414141414141414141414141414141414"
"1212121212121212121212121212121212121212121212121212121212121212"
]
},
"chunk_proposer_config": {
"max_tx_gas_per_chunk": 1123456,
"max_l2_tx_num_per_chunk": 1123,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"min_l1_commit_calldata_size_per_chunk": 11234,
"chunk_timeout_sec": 300
},
"batch_proposer_config": {
"max_chunk_num_per_batch": 112,
"max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345,
"min_chunk_num_per_batch": 11,
"batch_timeout_sec": 300
"proof_generation_freq": 1,
"batch_gas_threshold": 3000000,
"batch_tx_num_threshold": 44,
"batch_time_sec": 300,
"batch_commit_time_sec": 1200,
"batch_blocks_limit": 100,
"commit_tx_calldata_size_limit": 200000,
"public_input_config": {
"max_tx_num": 44,
"padding_tx_hash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
}
},
"db_config": {

View File

@@ -5,14 +5,14 @@ import (
"os"
"path/filepath"
"scroll-tech/common/database"
"scroll-tech/database"
)
// Config load configuration items.
type Config struct {
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *database.Config `json:"db_config"`
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *database.DBConfig `json:"db_config"`
}
// NewConfig returns a new instance of Config.

View File

@@ -12,8 +12,8 @@ import (
func TestConfig(t *testing.T) {
t.Run("Success Case", func(t *testing.T) {
cfg, err := NewConfig("../../conf/config.json")
assert.NoError(t, err)
cfg, err := NewConfig("../config.json")
assert.NoError(t, err, "failed to load config")
assert.Len(t, cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys, 1)
assert.Len(t, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys, 1)
@@ -23,11 +23,7 @@ func TestConfig(t *testing.T) {
assert.NoError(t, err)
tmpJSON := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
defer func() {
if _, err = os.Stat(tmpJSON); err == nil {
assert.NoError(t, os.Remove(tmpJSON))
}
}()
defer func() { _ = os.Remove(tmpJSON) }()
assert.NoError(t, os.WriteFile(tmpJSON, data, 0644))
@@ -46,17 +42,14 @@ func TestConfig(t *testing.T) {
t.Run("Invalid JSON Content", func(t *testing.T) {
// Create a temporary file with invalid JSON content
tmpFile, err := os.CreateTemp("", "invalid_json_config.json")
tempFile, err := os.CreateTemp("", "invalid_json_config.json")
assert.NoError(t, err)
defer func() {
assert.NoError(t, tmpFile.Close())
assert.NoError(t, os.Remove(tmpFile.Name()))
}()
defer os.Remove(tempFile.Name())
_, err = tmpFile.WriteString("{ invalid_json: ")
_, err = tempFile.WriteString("{ invalid_json: ")
assert.NoError(t, err)
_, err = NewConfig(tmpFile.Name())
_, err = NewConfig(tempFile.Name())
assert.Error(t, err)
})
}

View File

@@ -0,0 +1,49 @@
package config
import (
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/scroll-tech/go-ethereum/common"
"scroll-tech/common/types"
)
// L2Config loads l2geth configuration items.
type L2Config struct {
// Confirmations block height confirmations number.
Confirmations rpc.BlockNumber `json:"confirmations"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address"`
// The L2MessageQueue contract address deployed on layer 2 chain.
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
// The WithdrawTrieRootSlot in L2MessageQueue contract.
WithdrawTrieRootSlot common.Hash `json:"withdraw_trie_root_slot,omitempty"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
// The batch_proposer config
BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"`
}
// BatchProposerConfig loads l2watcher batch_proposer configuration items.
type BatchProposerConfig struct {
// Proof generation frequency, generating proof every k blocks
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
// Txnum threshold in a batch
BatchTxNumThreshold uint64 `json:"batch_tx_num_threshold"`
// Gas threshold in a batch
BatchGasThreshold uint64 `json:"batch_gas_threshold"`
// Time waited to generate a batch even if gas_threshold not met
BatchTimeSec uint64 `json:"batch_time_sec"`
// Time waited to commit batches before the calldata met CommitTxCalldataSizeLimit
BatchCommitTimeSec uint64 `json:"batch_commit_time_sec"`
// Max number of blocks in a batch
BatchBlocksLimit uint64 `json:"batch_blocks_limit"`
// Commit tx calldata size limit in bytes, target to cap the gas use of commit tx at 2M gas
CommitTxCalldataSizeLimit uint64 `json:"commit_tx_calldata_size_limit"`
// Commit tx calldata min size limit in bytes
CommitTxCalldataMinSize uint64 `json:"commit_tx_calldata_min_size,omitempty"`
// The public input hash config
PublicInputConfig *types.PublicInputHashConfig `json:"public_input_config"`
}

View File

@@ -1,60 +1,52 @@
module scroll-tech/bridge
go 1.19
go 1.18
require (
github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04
github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.3.0
gorm.io/gorm v1.25.2
github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
golang.org/x/sync v0.1.0
modernc.org/mathutil v1.4.1
)
require (
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/ethereum/go-ethereum v1.11.4 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/iden3/go-iden3-crypto v0.0.14 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.5.3 // indirect
github.com/scroll-tech/zktrie v0.5.2 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/smartystreets/assertions v1.13.1 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/crypto v0.7.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.1.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

View File

@@ -13,16 +13,17 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/ethereum/go-ethereum v1.11.4 h1:KG81SnUHXWk8LJB3mBcHg/E2yLvXoiPmRMCIRxgx3cE=
github.com/ethereum/go-ethereum v1.11.4/go.mod h1:it7x0DWnTDMfVFdXcU6Ti4KEFQynLHVRarcSlPr0HBo=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
@@ -31,8 +32,7 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -43,38 +43,26 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/iden3/go-iden3-crypto v0.0.14 h1:HQnFchY735JRNQxof6n/Vbyon4owj4+Ku+LNAamWV6c=
github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -88,26 +76,26 @@ github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HD
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY=
github.com/scroll-tech/zktrie v0.5.3 h1:jjzQchGU6XPL5s1C5bwwivSadefSRuYASE9OL7UKAdE=
github.com/scroll-tech/zktrie v0.5.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04 h1:PpI31kaBVm6+7sZtyK03Ex0QIg3P821Ktae0FHFh7IM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04/go.mod h1:jH8c08L9K8Hieaf0r/ur2P/cpesn4dFhmLm2Mmoi8kI=
github.com/scroll-tech/zktrie v0.5.2 h1:U34jPXMLGOlRHfdvYp5VVgOcC0RuPeJmcS3bWotCWiY=
github.com/scroll-tech/zktrie v0.5.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -117,47 +105,46 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -168,7 +155,8 @@ gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHN
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=

View File

@@ -1,46 +0,0 @@
package config
import (
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/scroll-tech/go-ethereum/common"
)
// L2Config loads l2geth configuration items.
type L2Config struct {
// Confirmations block height confirmations number.
Confirmations rpc.BlockNumber `json:"confirmations"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address"`
// The L2MessageQueue contract address deployed on layer 2 chain.
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
// The WithdrawTrieRootSlot in L2MessageQueue contract.
WithdrawTrieRootSlot common.Hash `json:"withdraw_trie_root_slot,omitempty"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
// The chunk_proposer config
ChunkProposerConfig *ChunkProposerConfig `json:"chunk_proposer_config"`
// The batch_proposer config
BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"`
}
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
}
// BatchProposerConfig loads batch_proposer configuration items.
type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
}

View File

@@ -1,20 +0,0 @@
package relayer
import "errors"
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
defaultL2MessageRelayMinGasLimit = 200000
)
var (
// ErrExecutionRevertedMessageExpired error of Message expired
ErrExecutionRevertedMessageExpired = errors.New("execution reverted: Message expired")
// ErrExecutionRevertedAlreadySuccessExecuted error of Message was already successfully executed
ErrExecutionRevertedAlreadySuccessExecuted = errors.New("execution reverted: Message was already successfully executed")
)

View File

@@ -1,257 +0,0 @@
package relayer
import (
"context"
"errors"
"math/big"
"testing"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/common"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
)
var (
templateL1Message = []*orm.L1Message{
{
QueueIndex: 1,
MsgHash: "msg_hash1",
Height: 1,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "0x19ece",
GasLimit: 11529940,
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer1Hash: "hash0",
},
{
QueueIndex: 2,
MsgHash: "msg_hash2",
Height: 2,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "0x19ece",
GasLimit: 11529940,
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer1Hash: "hash1",
},
}
)
func setupL1RelayerDB(t *testing.T) *gorm.DB {
db, err := database.InitDB(cfg.DBConfig)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
// testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) {
db := setupL1RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
func testL1RelayerProcessSaveEvents(t *testing.T) {
db := setupL1RelayerDB(t)
defer database.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db)
l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, relayer)
assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
relayer.ProcessSavedEvents()
msg1, err := l1MessageOrm.GetL1MessageByQueueIndex(1)
assert.NoError(t, err)
assert.Equal(t, types.MsgStatus(msg1.Status), types.MsgSubmitted)
msg2, err := l1MessageOrm.GetL1MessageByQueueIndex(2)
assert.NoError(t, err)
assert.Equal(t, types.MsgStatus(msg2.Status), types.MsgSubmitted)
}
func testL1RelayerMsgConfirm(t *testing.T) {
db := setupL1RelayerDB(t)
defer database.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db)
l1Messages := []*orm.L1Message{
{MsgHash: "msg-1", QueueIndex: 0},
{MsgHash: "msg-2", QueueIndex: 1},
}
err := l1MessageOrm.SaveL1Messages(context.Background(), l1Messages)
assert.NoError(t, err)
// Create and set up the Layer1 Relayer.
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: "msg-1",
IsSuccessful: true,
})
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: "msg-2",
IsSuccessful: false,
})
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
msg1, err1 := l1MessageOrm.GetL1MessageByMsgHash("msg-1")
msg2, err2 := l1MessageOrm.GetL1MessageByMsgHash("msg-2")
return err1 == nil && types.MsgStatus(msg1.Status) == types.MsgConfirmed &&
err2 == nil && types.MsgStatus(msg2.Status) == types.MsgRelayFailed
})
assert.True(t, ok)
}
func testL1RelayerGasOracleConfirm(t *testing.T) {
db := setupL1RelayerDB(t)
defer database.CloseDB(db)
l1BlockOrm := orm.NewL1Block(db)
l1Block := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0, GasOracleStatus: int16(types.GasOraclePending)},
{Hash: "gas-oracle-2", Number: 1, GasOracleStatus: int16(types.GasOraclePending)},
}
// Insert test data.
assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block))
// Create and set up the Layer2 Relayer.
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ID: "gas-oracle-1",
IsSuccessful: true,
})
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ID: "gas-oracle-2",
IsSuccessful: false,
})
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
msg1, err1 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-2"})
return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed
})
assert.True(t, ok)
}
func testL1RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL1RelayerDB(t)
defer database.CloseDB(db)
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, l1Relayer)
var l1BlockOrm *orm.L1Block
convey.Convey("GetLatestL1BlockHeight failure", t, func() {
targetErr := errors.New("GetLatestL1BlockHeight error")
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
return 0, targetErr
})
defer patchGuard.Reset()
l1Relayer.ProcessGasPriceOracle()
})
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
return 100, nil
})
defer patchGuard.Reset()
convey.Convey("GetL1Blocks failure", t, func() {
targetErr := errors.New("GetL1Blocks error")
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
return nil, targetErr
})
l1Relayer.ProcessGasPriceOracle()
})
convey.Convey("Block not exist", t, func() {
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []orm.L1Block{
{Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1},
}
return tmpInfo, nil
})
l1Relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
tmpInfo := []orm.L1Block{
{
Hash: "gas-oracle-1",
Number: 0,
GasOracleStatus: int16(types.GasOraclePending),
},
}
return tmpInfo, nil
})
convey.Convey("setL1BaseFee failure", t, func() {
targetErr := errors.New("pack setL1BaseFee error")
patchGuard.ApplyMethodFunc(l1Relayer.l1GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
return nil, targetErr
})
l1Relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(l1Relayer.l1GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
return []byte("for test"), nil
})
convey.Convey("send transaction failure", t, func() {
targetErr := errors.New("send transaction failure")
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, *big.Int, []byte, uint64) (hash common.Hash, err error) {
return common.Hash{}, targetErr
})
l1Relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, *big.Int, []byte, uint64) (hash common.Hash, err error) {
return common.Hash{}, nil
})
convey.Convey("UpdateL1GasOracleStatusAndOracleTxHash failure", t, func() {
targetErr := errors.New("UpdateL1GasOracleStatusAndOracleTxHash failure")
patchGuard.ApplyMethodFunc(l1BlockOrm, "UpdateL1GasOracleStatusAndOracleTxHash", func(context.Context, string, types.GasOracleStatus, string) error {
return targetErr
})
l1Relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(l1BlockOrm, "UpdateL1GasOracleStatusAndOracleTxHash", func(context.Context, string, types.GasOracleStatus, string) error {
return nil
})
l1Relayer.ProcessGasPriceOracle()
}

View File

@@ -1,576 +0,0 @@
package relayer
import (
"context"
"errors"
"fmt"
"math/big"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
)
var (
bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
//
// Actions are triggered by new head from layer 1 geth node.
// @todo It's better to be triggered by watcher.
type Layer2Relayer struct {
ctx context.Context
l2Client *ethclient.Client
db *gorm.DB
batchOrm *orm.Batch
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
cfg *config.RelayerConfig
messageSender *sender.Sender
l1MessengerABI *abi.ABI
rollupSender *sender.Sender
l1RollupABI *abi.ABI
gasOracleSender *sender.Sender
l2GasOracleABI *abi.ABI
minGasLimitForMessageRelay uint64
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
// A list of processing batches commitment.
// key(string): confirmation ID, value(string): batch hash.
processingCommitment sync.Map
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch hash.
processingFinalization sync.Map
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
log.Error("Failed to create messenger sender", "err", err)
return nil, err
}
rollupSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.RollupSenderPrivateKeys)
if err != nil {
log.Error("Failed to create rollup sender", "err", err)
return nil, err
}
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
if err != nil {
log.Error("Failed to create gas oracle sender", "err", err)
return nil, err
}
var minGasPrice uint64
var gasPriceDiff uint64
if cfg.GasOracleConfig != nil {
minGasPrice = cfg.GasOracleConfig.MinGasPrice
gasPriceDiff = cfg.GasOracleConfig.GasPriceDiff
} else {
minGasPrice = 0
gasPriceDiff = defaultGasPriceDiff
}
minGasLimitForMessageRelay := uint64(defaultL2MessageRelayMinGasLimit)
if cfg.MessageRelayMinGasLimit != 0 {
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
}
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
batchOrm: orm.NewBatch(db),
l2BlockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
l2Client: l2Client,
messageSender: messageSender,
l1MessengerABI: bridgeAbi.L1ScrollMessengerABI,
rollupSender: rollupSender,
l1RollupABI: bridgeAbi.ScrollChainABI,
gasOracleSender: gasOracleSender,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
processingMessage: sync.Map{},
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
}
// Initialize genesis before we do anything else
if initGenesis {
if err := layer2Relayer.initializeGenesis(); err != nil {
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
}
}
go layer2Relayer.handleConfirmLoop(ctx)
return layer2Relayer, nil
}
func (r *Layer2Relayer) initializeGenesis() error {
if count, err := r.batchOrm.GetBatchCount(r.ctx); err != nil {
return fmt.Errorf("failed to get batch count: %v", err)
} else if count > 0 {
log.Info("genesis already imported", "batch count", count)
return nil
}
genesis, err := r.l2Client.HeaderByNumber(r.ctx, big.NewInt(0))
if err != nil {
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
}
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
chunk := &types.Chunk{
Blocks: []*types.WrappedBlock{{
Header: genesis,
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
}},
}
err = r.db.Transaction(func(dbTX *gorm.DB) error {
var dbChunk *orm.Chunk
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, dbTX)
if err != nil {
return fmt.Errorf("failed to insert chunk: %v", err)
}
if err = r.chunkOrm.UpdateProvingStatus(r.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil {
return fmt.Errorf("failed to update genesis chunk proving status: %v", err)
}
var batch *orm.Batch
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk}, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
if err = r.chunkOrm.UpdateBatchHashInRange(r.ctx, 0, 0, batch.Hash, dbTX); err != nil {
return fmt.Errorf("failed to update batch hash for chunks: %v", err)
}
if err = r.batchOrm.UpdateProvingStatus(r.ctx, batch.Hash, types.ProvingTaskVerified, dbTX); err != nil {
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
}
if err = r.batchOrm.UpdateRollupStatus(r.ctx, batch.Hash, types.RollupFinalized, dbTX); err != nil {
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
}
// commit genesis batch on L1
// note: we do this inside the DB transaction so that we can revert all DB changes if this step fails
return r.commitGenesisBatch(batch.Hash, batch.BatchHeader, common.HexToHash(batch.StateRoot))
})
if err != nil {
return fmt.Errorf("update genesis transaction failed: %v", err)
}
log.Info("successfully imported genesis chunk and batch")
return nil
}
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
// encode "importGenesisBatch" transaction calldata
calldata, err := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if err != nil {
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, err)
}
// submit genesis batch to L1 rollup contract
txHash, err := r.rollupSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash.String(), "batchHash", batchHash)
// wait for confirmation
// we assume that no other transactions are sent before initializeGenesis completes
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
select {
// print progress
case <-ticker.C:
log.Info("Waiting for confirmation", "pending count", r.rollupSender.PendingCount())
// timeout
case <-time.After(5 * time.Minute):
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash.String())
// handle confirmation
case confirmation := <-r.rollupSender.ConfirmChan():
if confirmation.ID != batchHash {
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ID)
}
if !confirmation.IsSuccessful {
return fmt.Errorf("import genesis batch tx failed")
}
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
return nil
}
}
}
// ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() {
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
if err != nil {
log.Error("Failed to GetLatestBatch", "err", err)
return
}
if types.GasOracleStatus(batch.OracleStatus) == types.GasOraclePending {
suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx)
if err != nil {
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err)
return
}
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64 <= r.lastGasPrice-expectedDelta)) {
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
if err != nil {
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
return
}
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
}
return
}
err = r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
return
}
r.lastGasPrice = suggestGasPriceUint64
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
}
}
}
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
func (r *Layer2Relayer) ProcessPendingBatches() {
// get pending batches from database in ascending order by their index.
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 10)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
for _, batch := range pendingBatches {
// get current header and parent header.
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
if err != nil {
log.Error("Failed to decode batch header", "index", batch.Index, "error", err)
return
}
parentBatch := &orm.Batch{}
if batch.Index > 0 {
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
if err != nil {
log.Error("Failed to get parent batch header", "index", batch.Index-1, "error", err)
return
}
}
// get the chunks for the batch
startChunkIndex := batch.StartChunkIndex
endChunkIndex := batch.EndChunkIndex
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, startChunkIndex, endChunkIndex)
if err != nil {
log.Error("Failed to fetch chunks",
"start index", startChunkIndex,
"end index", endChunkIndex, "error", err)
return
}
encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks {
var wrappedBlocks []*types.WrappedBlock
wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil {
log.Error("Failed to fetch wrapped blocks",
"start number", c.StartBlockNumber,
"end number", c.EndBlockNumber, "error", err)
return
}
chunk := &types.Chunk{
Blocks: wrappedBlocks,
}
var chunkBytes []byte
chunkBytes, err = chunk.Encode(c.TotalL1MessagesPoppedBefore)
if err != nil {
log.Error("Failed to encode chunk", "error", err)
return
}
encodedChunks[i] = chunkBytes
}
calldata, err := r.l1RollupABI.Pack("commitBatch", currentBatchHeader.Version(), parentBatch.BatchHeader, encodedChunks, currentBatchHeader.SkippedL1MessageBitmap())
if err != nil {
log.Error("Failed to pack commitBatch", "index", batch.Index, "error", err)
return
}
// send transaction
txID := batch.Hash + "-commit"
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send commitBatch tx to layer1 ", "err", err)
}
return
}
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batch.Hash, txHash.String(), types.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
return
}
bridgeL2BatchesCommittedTotalCounter.Inc(1)
r.processingCommitment.Store(txID, batch.Hash)
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
}
}
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() {
// retrieves the earliest batch whose rollup status is 'committed'
fields := map[string]interface{}{
"rollup_status": types.RollupCommitted,
}
orderByList := []string{"index ASC"}
limit := 1
batches, err := r.batchOrm.GetBatches(r.ctx, fields, orderByList, limit)
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
return
}
if len(batches) != 1 {
log.Warn("Unexpected result for GetBlockBatches", "number of batches", len(batches))
return
}
batch := batches[0]
hash := batch.Hash
status := types.ProvingStatus(batch.ProvingStatus)
switch status {
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
// The proof for this block is not ready yet.
return
case types.ProvingTaskProved:
// It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
success := false
var parentBatchStateRoot string
if batch.Index > 0 {
var parentBatch *orm.Batch
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
// handle unexpected db error
if err != nil {
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
return
}
parentBatchStateRoot = parentBatch.StateRoot
}
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
}
}()
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, hash)
if err != nil {
log.Warn("get verified proof by hash failed", "hash", hash, "err", err)
return
}
if err = aggProof.SanityCheck(); err != nil {
log.Warn("agg_proof sanity check fails", "hash", hash, "error", err)
return
}
data, err := r.l1RollupABI.Pack(
"finalizeBatchWithProof",
batch.BatchHeader,
common.HexToHash(parentBatchStateRoot),
common.HexToHash(batch.StateRoot),
common.HexToHash(batch.WithdrawRoot),
aggProof.Proof,
)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return
}
txID := hash + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("finalizeBatchWithProof in layer1 failed",
"index", batch.Index, "hash", batch.Hash, "err", err)
}
return
}
bridgeL2BatchesFinalizedTotalCounter.Inc(1)
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
// record and sync with db, @todo handle db error
err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed",
"index", batch.Index, "batch hash", batch.Hash,
"tx hash", finalizeTxHash.String(), "err", err)
}
success = true
r.processingFinalization.Store(txID, hash)
default:
log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status)
}
}
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is CommitBatches transaction
if batchHash, ok := r.processingCommitment.Load(confirmation.ID); ok {
transactionType = "BatchesCommitment"
var status types.RollupStatus
if confirmation.IsSuccessful {
status = types.RollupCommitted
} else {
status = types.RollupCommitFailed
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
}
// @todo handle db error
err := r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed",
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(1)
r.processingCommitment.Delete(confirmation.ID)
}
// check whether it is proof finalization transaction
if batchHash, ok := r.processingFinalization.Load(confirmation.ID); ok {
transactionType = "ProofFinalization"
var status types.RollupStatus
if confirmation.IsSuccessful {
status = types.RollupFinalized
} else {
status = types.RollupFinalizeFailed
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
}
// @todo handle db error
err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed",
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
bridgeL2BatchesFinalizedConfirmedTotalCounter.Inc(1)
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageSender.ConfirmChan():
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupSender.ConfirmChan():
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else {
// @todo handle db error
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer1", "confirmation", cfm)
}
}
}
}

View File

@@ -1,296 +0,0 @@
package relayer
import (
"context"
"errors"
"math/big"
"testing"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/common"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
)
func setupL2RelayerDB(t *testing.T) *gorm.DB {
db, err := database.InitDB(cfg.DBConfig)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
func testL2RelayerProcessPendingBatches(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err)
relayer.ProcessPendingBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitting, statuses[0])
}
func testL2RelayerProcessCommittedBatches(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizeFailed, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
proof := &message.AggProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
}
func testL2RelayerRollupConfirm(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Simulate message confirmations.
processingKeys := []string{"committed-1", "committed-2", "finalized-1", "finalized-2"}
isSuccessful := []bool{true, false, true, false}
batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(processingKeys))
for i := range batchHashes {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err)
batchHashes[i] = batch.Hash
}
for i, key := range processingKeys[:2] {
l2Relayer.processingCommitment.Store(key, batchHashes[i])
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
ID: key,
IsSuccessful: isSuccessful[i],
TxHash: common.HexToHash("0x123456789abcdef"),
})
}
for i, key := range processingKeys[2:] {
l2Relayer.processingFinalization.Store(key, batchHashes[i+2])
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
ID: key,
IsSuccessful: isSuccessful[i+2],
TxHash: common.HexToHash("0x123456789abcdef"),
})
}
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
expectedStatuses := []types.RollupStatus{
types.RollupCommitted,
types.RollupCommitFailed,
types.RollupFinalized,
types.RollupFinalizeFailed,
}
for i, batchHash := range batchHashes {
batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHash}, nil, 0)
if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
return false
}
}
return true
})
assert.True(t, ok)
}
func testL2RelayerGasOracleConfirm(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
batchOrm := orm.NewBatch(db)
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
assert.NoError(t, err)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
assert.NoError(t, err)
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Simulate message confirmations.
type BatchConfirmation struct {
batchHash string
isSuccessful bool
}
confirmations := []BatchConfirmation{
{batchHash: batch1.Hash, isSuccessful: true},
{batchHash: batch2.Hash, isSuccessful: false},
}
for _, confirmation := range confirmations {
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ID: confirmation.batchHash,
IsSuccessful: confirmation.isSuccessful,
})
}
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed}
for i, confirmation := range confirmations {
gasOracle, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": confirmation.batchHash}, nil, 0)
if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] {
return false
}
}
return true
})
assert.True(t, ok)
}
func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err)
assert.NotNil(t, relayer)
var batchOrm *orm.Batch
convey.Convey("Failed to GetLatestBatch", t, func() {
targetErr := errors.New("GetLatestBatch error")
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
return nil, targetErr
})
defer patchGuard.Reset()
relayer.ProcessGasPriceOracle()
})
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
batch := orm.Batch{
OracleStatus: int16(types.GasOraclePending),
Hash: "0x0000000000000000000000000000000000000000",
}
return &batch, nil
})
defer patchGuard.Reset()
convey.Convey("Failed to fetch SuggestGasPrice from l2geth", t, func() {
targetErr := errors.New("SuggestGasPrice error")
patchGuard.ApplyMethodFunc(relayer.l2Client, "SuggestGasPrice", func(ctx context.Context) (*big.Int, error) {
return nil, targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(relayer.l2Client, "SuggestGasPrice", func(ctx context.Context) (*big.Int, error) {
return big.NewInt(100), nil
})
convey.Convey("Failed to pack setL2BaseFee", t, func() {
targetErr := errors.New("setL2BaseFee error")
patchGuard.ApplyMethodFunc(relayer.l2GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
return nil, targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(relayer.l2GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
return nil, nil
})
convey.Convey("Failed to send setL2BaseFee tx to layer2", t, func() {
targetErr := errors.New("failed to send setL2BaseFee tx to layer2 error")
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
return common.Hash{}, targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
return common.HexToHash("0x56789abcdef1234"), nil
})
convey.Convey("UpdateGasOracleStatusAndOracleTxHash failed", t, func() {
targetErr := errors.New("UpdateL2GasOracleStatusAndOracleTxHash error")
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
return targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
return nil
})
relayer.ProcessGasPriceOracle()
}

View File

@@ -1,103 +0,0 @@
package relayer
import (
"encoding/json"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
)
var (
// config
cfg *config.Config
base *docker.App
// l2geth client
l2Cli *ethclient.Client
// l2 block
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
// chunk
chunk1 *types.Chunk
chunk2 *types.Chunk
chunkHash1 common.Hash
chunkHash2 common.Hash
)
func setupEnv(t *testing.T) {
// Load config.
var err error
cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err)
base.RunImages(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
// Create l2geth client.
l2Cli, err = base.L2Client()
assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock1 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace1, wrappedBlock1)
assert.NoError(t, err)
chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
chunkHash1, err = chunk1.Hash(0)
assert.NoError(t, err)
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace2, wrappedBlock2)
assert.NoError(t, err)
chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
assert.NoError(t, err)
}
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func TestFunctions(t *testing.T) {
setupEnv(t)
// Run l1 relayer test cases.
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
t.Run("TestL1RelayerProcessSaveEvents", testL1RelayerProcessSaveEvents)
t.Run("TestL1RelayerMsgConfirm", testL1RelayerMsgConfirm)
t.Run("TestL1RelayerGasOracleConfirm", testL1RelayerGasOracleConfirm)
t.Run("TestL1RelayerProcessGasPriceOracle", testL1RelayerProcessGasPriceOracle)
// Run l2 relayer test cases.
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
}

View File

@@ -1,171 +0,0 @@
package watcher
import (
"context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
)
// BatchProposer proposes batches based on available unbatched chunks.
type BatchProposer struct {
ctx context.Context
db *gorm.DB
batchOrm *orm.Batch
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxChunkNumPerBatch uint64
maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint32
minChunkNumPerBatch uint64
batchTimeoutSec uint64
}
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB) *BatchProposer {
return &BatchProposer{
ctx: ctx,
db: db,
batchOrm: orm.NewBatch(db),
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec,
}
}
// TryProposeBatch tries to propose a new batches.
func (p *BatchProposer) TryProposeBatch() {
dbChunks, err := p.proposeBatchChunks()
if err != nil {
log.Error("proposeBatchChunks failed", "err", err)
return
}
if err := p.updateBatchInfoInDB(dbChunks); err != nil {
log.Error("update batch info in db failed", "err", err)
}
}
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
numChunks := len(dbChunks)
if numChunks <= 0 {
return nil
}
chunks, err := p.dbChunksToBridgeChunks(dbChunks)
if err != nil {
return err
}
startChunkIndex := dbChunks[0].Index
startChunkHash := dbChunks[0].Hash
endChunkIndex := dbChunks[numChunks-1].Index
endChunkHash := dbChunks[numChunks-1].Hash
err = p.db.Transaction(func(dbTX *gorm.DB) error {
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
if dbErr != nil {
return dbErr
}
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
if dbErr != nil {
return dbErr
}
return nil
})
return err
}
func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
dbChunks, err := p.chunkOrm.GetUnbatchedChunks(p.ctx)
if err != nil {
return nil, err
}
if len(dbChunks) == 0 {
return nil, nil
}
firstChunk := dbChunks[0]
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
totalL1CommitGas := firstChunk.TotalL1CommitGas
var totalChunks uint64 = 1
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if totalL1CommitGas > p.maxL1CommitGasPerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
for i, chunk := range dbChunks[1:] {
totalChunks++
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
totalL1CommitGas += chunk.TotalL1CommitGas
if totalChunks > p.maxChunkNumPerBatch ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalL1CommitGas > p.maxL1CommitGasPerBatch {
return dbChunks[:i+1], nil
}
}
var hasChunkTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
"start block number", dbChunks[0].StartBlockNumber,
"first block timestamp", dbChunks[0].StartBlockTime,
"chunk outdated time threshold", currentTimeSec,
)
hasChunkTimeout = true
}
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
log.Warn("The chunk number of the batch is less than the minimum limit",
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
)
return nil, nil
}
return dbChunks, nil
}
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
chunks := make([]*types.Chunk, len(dbChunks))
for i, c := range dbChunks {
wrappedBlocks, err := p.l2BlockOrm.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil {
log.Error("Failed to fetch wrapped blocks",
"start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
return nil, err
}
chunks[i] = &types.Chunk{
Blocks: wrappedBlocks,
}
}
return chunks, nil
}

View File

@@ -1,71 +0,0 @@
package watcher
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
)
// TODO: Add unit tests that the limits are enforced correctly.
func testBatchProposer(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
ChunkTimeoutSec: 300,
}, db)
cp.TryProposeChunk()
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db)
bp.TryProposeBatch()
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
assert.NoError(t, err)
assert.Empty(t, chunks)
batchOrm := orm.NewBatch(db)
// get all batches.
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, 1)
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
assert.Equal(t, uint64(0), batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, batches, 1)
assert.Equal(t, batches[0].Hash, dbChunks[0].BatchHash)
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(dbChunks[0].ProvingStatus))
blockOrm := orm.NewL2Block(db)
blocks, err := blockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, dbChunks[0].Hash, blocks[0].ChunkHash)
assert.Equal(t, dbChunks[0].Hash, blocks[1].ChunkHash)
}

View File

@@ -1,169 +0,0 @@
package watcher
import (
"context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
)
// ChunkProposer proposes chunks based on available unchunked blocks.
type ChunkProposer struct {
ctx context.Context
db *gorm.DB
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxTxGasPerChunk uint64
maxL2TxNumPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
minL1CommitCalldataSizePerChunk uint64
chunkTimeoutSec uint64
}
// NewChunkProposer creates a new ChunkProposer instance.
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB) *ChunkProposer {
return &ChunkProposer{
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxTxGasPerChunk: cfg.MaxTxGasPerChunk,
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
}
}
// TryProposeChunk tries to propose a new chunk.
func (p *ChunkProposer) TryProposeChunk() {
proposedChunk, err := p.proposeChunk()
if err != nil {
log.Error("propose new chunk failed", "err", err)
return
}
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
log.Error("update chunk info in orm failed", "err", err)
}
}
func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
if chunk == nil {
return nil
}
err := p.db.Transaction(func(dbTX *gorm.DB) error {
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
if err != nil {
return err
}
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
log.Error("failed to update chunk_hash for l2_blocks", "chunk hash", chunk.Hash, "start block", 0, "end block", 0, "err", err)
return err
}
return nil
})
return err
}
func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
if err != nil {
return nil, err
}
if len(blocks) == 0 {
return nil, nil
}
firstBlock := blocks[0]
totalTxGasUsed := firstBlock.Header.GasUsed
totalL2TxNum := firstBlock.L2TxsNum()
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
totalL1CommitGas := firstBlock.EstimateL1CommitGas()
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
firstBlock.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
if totalL1CommitGas > p.maxL1CommitGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
firstBlock.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
firstBlock.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
// Check if the first block breaks any soft limits.
if totalTxGasUsed > p.maxTxGasPerChunk {
log.Warn(
"The first block in chunk exceeds l2 tx gas limit",
"block number", firstBlock.Header.Number,
"gas used", totalTxGasUsed,
"max gas limit", p.maxTxGasPerChunk,
)
}
for i, block := range blocks[1:] {
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas()
if totalTxGasUsed > p.maxTxGasPerChunk ||
totalL2TxNum > p.maxL2TxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalL1CommitGas > p.maxL1CommitGasPerChunk {
blocks = blocks[:i+1]
break
}
}
var hasBlockTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
"block number", blocks[0].Header.Number,
"block timestamp", blocks[0].Header.Time,
"block outdated time threshold", currentTimeSec,
)
hasBlockTimeout = true
}
if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk {
log.Warn("The calldata size of the chunk is less than the minimum limit",
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
)
return nil, nil
}
return &types.Chunk{Blocks: blocks}, nil
}

View File

@@ -1,46 +0,0 @@
package watcher
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm"
)
// TODO: Add unit tests that the limits are enforced correctly.
func testChunkProposer(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
ChunkTimeoutSec: 300,
}, db)
cp.TryProposeChunk()
expectedChunk := &types.Chunk{
Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2},
}
expectedHash, err := expectedChunk.Hash(0)
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
assert.NoError(t, err)
assert.Len(t, chunks, 1)
assert.Equal(t, expectedHash.Hex(), chunks[0].Hash)
}

View File

@@ -1,311 +0,0 @@
package watcher
import (
"context"
"fmt"
"math/big"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
)
// Metrics
var (
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
)
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
type L2WatcherClient struct {
ctx context.Context
event.Feed
*ethclient.Client
l2BlockOrm *orm.L2Block
l1MessageOrm *orm.L1Message
confirmations rpc.BlockNumber
messengerAddress common.Address
messengerABI *abi.ABI
messageQueueAddress common.Address
messageQueueABI *abi.ABI
withdrawTrieRootSlot common.Hash
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
stopped uint64
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
l1MessageOrm := orm.NewL1Message(db)
var savedHeight uint64
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
if err != nil || l1msg == nil {
log.Warn("fetch height from db failed", "err", err)
savedHeight = 0
} else {
receipt, err := client.TransactionReceipt(ctx, common.HexToHash(l1msg.Layer2Hash))
if err != nil || receipt == nil {
log.Warn("get tx from l2 failed", "err", err)
savedHeight = 0
} else {
savedHeight = receipt.BlockNumber.Uint64()
}
}
w := L2WatcherClient{
ctx: ctx,
Client: client,
l2BlockOrm: orm.NewL2Block(db),
l1MessageOrm: orm.NewL1Message(db),
processedMsgHeight: savedHeight,
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: bridgeAbi.L2ScrollMessengerABI,
messageQueueAddress: messageQueueAddress,
messageQueueABI: bridgeAbi.L2MessageQueueABI,
withdrawTrieRootSlot: withdrawTrieRootSlot,
stopped: 0,
}
return &w
}
const blockTracesFetchLimit = uint64(10)
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx)
if err != nil {
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
return
}
// Fetch and store block traces for missing blocks
for from := heightInDB + 1; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1
if to > blockHeight {
to = blockHeight
}
if err = w.getAndStoreBlockTraces(w.ctx, from, to); err != nil {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
}
}
func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
txsData := make([]*gethTypes.TransactionData, len(txs))
for i, tx := range txs {
v, r, s := tx.RawSignatureValues()
nonce := tx.Nonce()
// We need QueueIndex in `NewBatchHeader`. However, `TransactionData`
// does not have this field. Since `L1MessageTx` do not have a nonce,
// we reuse this field for storing the queue index.
if msg := tx.AsL1MessageTx(); msg != nil {
nonce = msg.QueueIndex
}
txsData[i] = &gethTypes.TransactionData{
Type: tx.Type(),
TxHash: tx.Hash().String(),
Nonce: nonce,
ChainId: (*hexutil.Big)(tx.ChainId()),
Gas: tx.Gas(),
GasPrice: (*hexutil.Big)(tx.GasPrice()),
To: tx.To(),
Value: (*hexutil.Big)(tx.Value()),
Data: hexutil.Encode(tx.Data()),
IsCreate: tx.To() == nil,
V: (*hexutil.Big)(v),
R: (*hexutil.Big)(r),
S: (*hexutil.Big)(s),
}
}
return txsData
}
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var blocks []*types.WrappedBlock
for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number)
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
if err2 != nil {
return fmt.Errorf("failed to GetBlockByNumber: %v. number: %v", err2, number)
}
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String())
withdrawTrieRoot, err3 := w.StorageAt(ctx, w.messageQueueAddress, w.withdrawTrieRootSlot, big.NewInt(int64(number)))
if err3 != nil {
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
}
blocks = append(blocks, &types.WrappedBlock{
Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()),
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),
})
}
if len(blocks) > 0 {
if err := w.l2BlockOrm.InsertL2Blocks(w.ctx, blocks); err != nil {
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
}
}
return nil
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *L2WatcherClient) FetchContractEvent() {
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
return
}
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight)
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
if to > toBlock {
to = toBlock
}
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.messageQueueAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 4)
query.Topics[0][0] = bridgeAbi.L2SentMessageEventSignature
query.Topics[0][1] = bridgeAbi.L2RelayedMessageEventSignature
query.Topics[0][2] = bridgeAbi.L2FailedRelayedMessageEventSignature
query.Topics[0][3] = bridgeAbi.L2AppendMessageEventSignature
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("failed to get event logs", "err", err)
return
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
bridgeL2MsgsSyncHeightGauge.Update(to)
continue
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return
}
relayedMessageCount := int64(len(relayedMessageEvents))
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
// Update relayed message first to make sure we don't forget to update submited message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
var msgStatus types.MsgStatus
if msg.isSuccessful {
msgStatus = types.MsgConfirmed
} else {
msgStatus = types.MsgFailed
}
if err = w.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return
}
}
w.processedMsgHeight = uint64(to)
bridgeL2MsgsSyncHeightGauge.Update(to)
}
}
func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]relayedMessage, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var relayedMessages []relayedMessage
for _, vLog := range logs {
switch vLog.Topics[0] {
case bridgeAbi.L2RelayedMessageEventSignature:
event := bridgeAbi.L2RelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
return relayedMessages, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: true,
})
case bridgeAbi.L2FailedRelayedMessageEventSignature:
event := bridgeAbi.L2FailedRelayedMessageEvent{}
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
if err != nil {
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
return relayedMessages, err
}
relayedMessages = append(relayedMessages, relayedMessage{
msgHash: event.MessageHash,
txHash: vLog.TxHash,
isSuccessful: false,
})
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
}
}
return relayedMessages, nil
}

View File

@@ -1,194 +0,0 @@
package watcher
import (
"context"
"crypto/ecdsa"
"errors"
"math/big"
"strconv"
"testing"
"time"
"gorm.io/gorm"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
cutils "scroll-tech/common/utils"
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge"
)
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
db := setupDB(t)
l2cfg := cfg.L2Config
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
return watcher, db
}
func testCreateNewWatcherAndStop(t *testing.T) {
wc, db := setupL2Watcher(t)
subCtx, cancel := context.WithCancel(context.Background())
defer func() {
cancel()
defer database.CloseDB(db)
}()
loopToFetchEvent(subCtx, wc)
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
assert.NoError(t, err)
// Create several transactions and commit to block
numTransactions := 3
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
for i := 0; i < numTransactions; i++ {
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil, 0)
assert.NoError(t, err)
<-newSender.ConfirmChan()
}
blockNum, err := l2Cli.BlockNumber(context.Background())
assert.NoError(t, err)
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
}
func testFetchRunningMissingBlocks(t *testing.T) {
_, db := setupL2Watcher(t)
defer database.CloseDB(db)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
// deploy mock bridge
_, tx, _, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
ok := cutils.TryTimes(10, func() bool {
latestHeight, err := l2Cli.BlockNumber(context.Background())
if err != nil {
return false
}
wc := prepareWatcherClient(l2Cli, db, address)
wc.TryFetchRunningMissingBlocks(latestHeight)
fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
return err == nil && fetchedHeight == latestHeight
})
assert.True(t, ok)
}
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB, contractAddr common.Address) *L2WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(53077))
assert.NoError(t, err)
auth.Value = big.NewInt(0) // in wei
assert.NoError(t, err)
auth.GasPrice, err = l2Cli.SuggestGasPrice(context.Background())
assert.NoError(t, err)
auth.GasLimit = 500000
return auth
}
func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
}
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t)
defer database.CloseDB(db)
logs := []gethTypes.Log{
{
Topics: []common.Hash{bridgeAbi.L2RelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack RelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog RelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr
})
defer patchGuard.Reset()
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, relayedMessages)
})
convey.Convey("L2RelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridgeAbi.L2RelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
watcher, db := setupL2Watcher(t)
defer database.CloseDB(db)
logs := []gethTypes.Log{
{
Topics: []common.Hash{bridgeAbi.L2FailedRelayedMessageEventSignature},
BlockNumber: 100,
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
},
}
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
return targetErr
})
defer patchGuard.Reset()
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.EqualError(t, err, targetErr.Error())
assert.Empty(t, relayedMessages)
})
convey.Convey("L2FailedRelayedMessageEventSignature success", t, func() {
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
tmpOut := out.(*bridgeAbi.L2FailedRelayedMessageEvent)
tmpOut.MessageHash = msgHash
return nil
})
defer patchGuard.Reset()
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
assert.NoError(t, err)
assert.Len(t, relayedMessages, 1)
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
})
}

View File

@@ -1,411 +0,0 @@
package orm
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
)
const defaultBatchHeaderVersion = 0
// Batch represents a batch of chunks.
type Batch struct {
db *gorm.DB `gorm:"column:-"`
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
// proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// rollup
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
// gas oracle
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewBatch creates a new Batch database instance.
func NewBatch(db *gorm.DB) *Batch {
return &Batch{db: db}
}
// TableName returns the table name for the Batch model.
func (*Batch) TableName() string {
return "batch"
}
// GetBatches retrieves selected batches from the database.
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
for key, value := range fields {
db = db.Where(key, value)
}
for _, orderBy := range orderByList {
db = db.Order(orderBy)
}
if limit > 0 {
db = db.Limit(limit)
}
db = db.Order("index ASC")
var batches []*Batch
if err := db.Find(&batches).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatches error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
}
return batches, nil
}
// GetBatchCount retrieves the total number of batches in the database.
func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
var count int64
if err := db.Count(&count).Error; err != nil {
return 0, fmt.Errorf("Batch.GetBatchCount error: %w", err)
}
return uint64(count), nil
}
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("proof")
db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified)
var batch Batch
if err := db.Find(&batch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
}
var proof message.AggProof
if err := json.Unmarshal(batch.Proof, &proof); err != nil {
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
}
return &proof, nil
}
// GetLatestBatch retrieves the latest batch from the database.
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Order("index desc")
var latestBatch Batch
if err := db.First(&latestBatch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
}
return &latestBatch, nil
}
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
if len(hashes) == 0 {
return nil, nil
}
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("hash, rollup_status")
db = db.Where("hash IN ?", hashes)
var batches []Batch
if err := db.Find(&batches).Error; err != nil {
return nil, fmt.Errorf("Batch.GetRollupStatusByHashList error: %w, hashes: %v", err, hashes)
}
hashToStatusMap := make(map[string]types.RollupStatus)
for _, batch := range batches {
hashToStatusMap[batch.Hash] = types.RollupStatus(batch.RollupStatus)
}
var statuses []types.RollupStatus
for _, hash := range hashes {
status, ok := hashToStatusMap[hash]
if !ok {
return nil, fmt.Errorf("Batch.GetRollupStatusByHashList: hash not found in database: %s", hash)
}
statuses = append(statuses, status)
}
return statuses, nil
}
// GetPendingBatches retrieves pending batches up to the specified limit.
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetPendingBatches(ctx context.Context, limit int) ([]*Batch, error) {
if limit <= 0 {
return nil, errors.New("limit must be greater than zero")
}
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("rollup_status = ?", types.RollupPending)
db = db.Order("index ASC")
db = db.Limit(limit)
var batches []*Batch
if err := db.Find(&batches).Error; err != nil {
return nil, fmt.Errorf("Batch.GetPendingBatches error: %w", err)
}
return batches, nil
}
// GetBatchByIndex retrieves the batch by the given index.
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("index = ?", index)
var batch Batch
if err := db.First(&batch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
}
return &batch, nil
}
// InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
if len(chunks) == 0 {
return nil, errors.New("invalid args")
}
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get the latest batch", "err", err)
return nil, err
}
var batchIndex uint64
var parentBatchHash common.Hash
var totalL1MessagePoppedBefore uint64
var version uint8 = defaultBatchHeaderVersion
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
// not batch record in the db, we then use default empty values for the creating batch;
// if parentBatch!=nil then err=nil, then we fill the parentBatch-related data into the creating batch
if parentBatch != nil {
batchIndex = parentBatch.Index + 1
parentBatchHash = common.HexToHash(parentBatch.Hash)
var parentBatchHeader *types.BatchHeader
parentBatchHeader, err = types.DecodeBatchHeader(parentBatch.BatchHeader)
if err != nil {
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
return nil, err
}
totalL1MessagePoppedBefore = parentBatchHeader.TotalL1MessagePopped()
version = parentBatchHeader.Version()
}
batchHeader, err := types.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
if err != nil {
log.Error("failed to create batch header",
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
"parent hash", parentBatchHash, "number of chunks", len(chunks), "err", err)
return nil, err
}
numChunks := len(chunks)
lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
newBatch := Batch{
Index: batchIndex,
Hash: batchHeader.Hash().Hex(),
StartChunkHash: startChunkHash,
StartChunkIndex: startChunkIndex,
EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
BatchHeader: batchHeader.Encode(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db.WithContext(ctx)
db = db.Model(&Batch{})
if err := db.Create(&newBatch).Error; err != nil {
log.Error("failed to insert batch", "batch", newBatch, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
return &newBatch, nil
}
// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch.
func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
updateFields := make(map[string]interface{})
updateFields["oracle_status"] = int(status)
updateFields["oracle_tx_hash"] = txHash
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateL2GasOracleStatusAndOracleTxHash error: %w, batch hash: %v, status: %v, txHash: %v", err, hash, status.String(), txHash)
}
return nil
}
// UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
}
return nil
}
// UpdateRollupStatus updates the rollup status of a batch.
func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["rollup_status"] = int(status)
switch status {
case types.RollupCommitted:
updateFields["committed_at"] = time.Now()
case types.RollupFinalized:
updateFields["finalized_at"] = time.Now()
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateRollupStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
}
return nil
}
// UpdateCommitTxHashAndRollupStatus updates the commit transaction hash and rollup status for a batch.
func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
updateFields := make(map[string]interface{})
updateFields["commit_tx_hash"] = commitTxHash
updateFields["rollup_status"] = int(status)
if status == types.RollupCommitted {
updateFields["committed_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateCommitTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), commitTxHash)
}
return nil
}
// UpdateFinalizeTxHashAndRollupStatus updates the finalize transaction hash and rollup status for a batch.
func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error {
updateFields := make(map[string]interface{})
updateFields["finalize_tx_hash"] = finalizeTxHash
updateFields["rollup_status"] = int(status)
if status == types.RollupFinalized {
updateFields["finalized_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), finalizeTxHash)
}
return nil
}
// UpdateProofByHash updates the batch proof by hash.
// for unit test.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof)
if err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
}
updateFields := make(map[string]interface{})
updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err = db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
}
return nil
}

View File

@@ -1,227 +0,0 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"scroll-tech/common/types"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
)
// Chunk represents a chunk of blocks in the database.
type Chunk struct {
db *gorm.DB `gorm:"-"`
// chunk
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
TotalL1MessagesPoppedInChunk uint32 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
// proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint32 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
TotalL1CommitCalldataSize uint32 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewChunk creates a new Chunk database instance.
func NewChunk(db *gorm.DB) *Chunk {
return &Chunk{db: db}
}
// TableName returns the table name for the chunk model.
func (*Chunk) TableName() string {
return "chunk"
}
// GetChunksInRange retrieves chunks within a given range (inclusive) from the database.
// The range is closed, i.e., it includes both start and end indices.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endIndex uint64) ([]*Chunk, error) {
if startIndex > endIndex {
return nil, fmt.Errorf("Chunk.GetChunksInRange: start index should be less than or equal to end index, start index: %v, end index: %v", startIndex, endIndex)
}
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
db = db.Order("index ASC")
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetChunksInRange error: %w, start index: %v, end index: %v", err, startIndex, endIndex)
}
// sanity check
if uint64(len(chunks)) != endIndex-startIndex+1 {
return nil, fmt.Errorf("Chunk.GetChunksInRange: incorrect number of chunks, expected: %v, got: %v, start index: %v, end index: %v", endIndex-startIndex+1, len(chunks), startIndex, endIndex)
}
return chunks, nil
}
// GetUnbatchedChunks retrieves unbatched chunks from the database.
func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash IS NULL")
db = db.Order("index asc")
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetUnbatchedChunks error: %w", err)
}
return chunks, nil
}
// GetLatestChunk retrieves the latest chunk from the database.
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Order("index desc")
var latestChunk Chunk
if err := db.First(&latestChunk).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetLatestChunk error: %w", err)
}
return &latestChunk, nil
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args")
}
var chunkIndex uint64
var totalL1MessagePoppedBefore uint64
parentChunk, err := o.GetLatestChunk(ctx)
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get latest chunk", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
// if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's
// not chunk record in the db, we then use default empty values for the creating chunk;
// if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk
if parentChunk != nil {
chunkIndex = parentChunk.Index + 1
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + uint64(parentChunk.TotalL1MessagesPoppedInChunk)
}
hash, err := chunk.Hash(totalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to get chunk hash", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
var totalL2TxGas uint64
var totalL2TxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
for _, block := range chunk.Blocks {
totalL2TxGas += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas()
}
numBlocks := len(chunk.Blocks)
newChunk := Chunk{
Index: chunkIndex,
Hash: hash.Hex(),
StartBlockNumber: chunk.Blocks[0].Header.Number.Uint64(),
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: totalL2TxGas,
TotalL2TxNum: uint32(totalL2TxNum),
TotalL1CommitCalldataSize: uint32(totalL1CommitCalldataSize),
TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)),
ProvingStatus: int16(types.ProvingTaskUnassigned),
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
if err := db.Create(&newChunk).Error; err != nil {
return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash)
}
return &newChunk, nil
}
// UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatus error: %w, chunk hash: %v, status: %v", err, hash, status.String())
}
return nil
}
// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end indices.
func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
if err := db.Update("batch_hash", batchHash).Error; err != nil {
return fmt.Errorf("Chunk.UpdateBatchHashInRange error: %w, start index: %v, end index: %v, batch hash: %v", err, startIndex, endIndex, batchHash)
}
return nil
}

View File

@@ -1,103 +0,0 @@
package orm
import (
"context"
"fmt"
"time"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L1Block is structure of stored l1 block message
type L1Block struct {
db *gorm.DB `gorm:"column:-"`
// block
Number uint64 `json:"number" gorm:"column:number"`
Hash string `json:"hash" gorm:"column:hash"`
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
// oracle
GasOracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewL1Block create an l1Block instance
func NewL1Block(db *gorm.DB) *L1Block {
return &L1Block{db: db}
}
// TableName define the L1Block table name
func (*L1Block) TableName() string {
return "l1_block"
}
// GetLatestL1BlockHeight get the latest l1 block height
func (o *L1Block) GetLatestL1BlockHeight(ctx context.Context) (uint64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
db = db.Select("COALESCE(MAX(number), 0)")
var maxNumber uint64
if err := db.Row().Scan(&maxNumber); err != nil {
return 0, fmt.Errorf("L1Block.GetLatestL1BlockHeight error: %w", err)
}
return maxNumber, nil
}
// GetL1Blocks get the l1 blocks
func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}) ([]L1Block, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
for key, value := range fields {
db = db.Where(key, value)
}
db = db.Order("number ASC")
var l1Blocks []L1Block
if err := db.Find(&l1Blocks).Error; err != nil {
return nil, fmt.Errorf("L1Block.GetL1Blocks error: %w, fields: %v", err, fields)
}
return l1Blocks, nil
}
// InsertL1Blocks batch insert l1 blocks
func (o *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
if len(blocks) == 0 {
return nil
}
db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
if err := db.Create(&blocks).Error; err != nil {
return fmt.Errorf("L1Block.InsertL1Blocks error: %w", err)
}
return nil
}
// UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash
func (o *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
updateFields := map[string]interface{}{
"oracle_status": int(status),
"oracle_tx_hash": txHash,
}
db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
db = db.Where("hash", blockHash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("L1Block.UpdateL1GasOracleStatusAndOracleTxHash error: %w, block hash: %v, status: %v, tx hash: %v", err, blockHash, status.String(), txHash)
}
return nil
}

View File

@@ -1,140 +0,0 @@
package orm
import (
"context"
"database/sql"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L1Message is structure of stored layer1 bridge message
type L1Message struct {
db *gorm.DB `gorm:"column:-"`
QueueIndex uint64 `json:"queue_index" gorm:"column:queue_index"`
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
Height uint64 `json:"height" gorm:"column:height"`
GasLimit uint64 `json:"gas_limit" gorm:"column:gas_limit"`
Sender string `json:"sender" gorm:"column:sender"`
Target string `json:"target" gorm:"column:target"`
Value string `json:"value" gorm:"column:value"`
Calldata string `json:"calldata" gorm:"column:calldata"`
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash"`
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:NULL"`
Status int `json:"status" gorm:"column:status;default:1"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewL1Message create an L1MessageOrm instance
func NewL1Message(db *gorm.DB) *L1Message {
return &L1Message{db: db}
}
// TableName define the L1Message table name
func (*L1Message) TableName() string {
return "l1_message"
}
// GetLayer1LatestWatchedHeight returns latest height stored in the table
func (m *L1Message) GetLayer1LatestWatchedHeight() (int64, error) {
// @note It's not correct, since we may don't have message in some blocks.
// But it will only be called at start, some redundancy is acceptable.
var maxHeight sql.NullInt64
result := m.db.Model(&L1Message{}).Select("MAX(height)").Scan(&maxHeight)
if result.Error != nil {
return -1, result.Error
}
if maxHeight.Valid {
return maxHeight.Int64, nil
}
return -1, nil
}
// GetLayer1LatestMessageWithLayer2Hash returns latest l1 message with layer2 hash
func (m *L1Message) GetLayer1LatestMessageWithLayer2Hash() (*L1Message, error) {
var msg *L1Message
err := m.db.Where("layer2_hash IS NOT NULL").Order("queue_index DESC").First(&msg).Error
if err != nil {
return nil, err
}
return msg, nil
}
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
func (m *L1Message) GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]L1Message, error) {
var msgs []L1Message
err := m.db.Where("status", int(status)).Order("queue_index ASC").Limit(int(limit)).Find(&msgs).Error
if err != nil {
return nil, err
}
return msgs, nil
}
// GetL1MessageByQueueIndex fetch message by queue_index
// for unit test
func (m *L1Message) GetL1MessageByQueueIndex(queueIndex uint64) (*L1Message, error) {
var msg L1Message
err := m.db.Where("queue_index", queueIndex).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// GetL1MessageByMsgHash fetch message by queue_index
// for unit test
func (m *L1Message) GetL1MessageByMsgHash(msgHash string) (*L1Message, error) {
var msg L1Message
err := m.db.Where("msg_hash", msgHash).First(&msg).Error
if err != nil {
return nil, err
}
return &msg, nil
}
// SaveL1Messages batch save a list of layer1 messages
func (m *L1Message) SaveL1Messages(ctx context.Context, messages []*L1Message) error {
if len(messages) == 0 {
return nil
}
err := m.db.WithContext(ctx).Create(&messages).Error
if err != nil {
queueIndices := make([]uint64, 0, len(messages))
heights := make([]uint64, 0, len(messages))
for _, msg := range messages {
queueIndices = append(queueIndices, msg.QueueIndex)
heights = append(heights, msg.Height)
}
log.Error("failed to insert l1Messages", "queueIndices", queueIndices, "heights", heights, "err", err)
}
return err
}
// UpdateLayer1Status updates message stauts, given message hash
func (m *L1Message) UpdateLayer1Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
if err := m.db.Model(&L1Message{}).WithContext(ctx).Where("msg_hash", msgHash).Update("status", int(status)).Error; err != nil {
return err
}
return nil
}
// UpdateLayer1StatusAndLayer2Hash updates message status and layer2 transaction hash, given message hash
func (m *L1Message) UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer2Hash string) error {
updateFields := map[string]interface{}{
"status": int(status),
"layer2_hash": layer2Hash,
}
if err := m.db.Model(&L1Message{}).WithContext(ctx).Where("msg_hash", msgHash).Updates(updateFields).Error; err != nil {
return err
}
return nil
}

View File

@@ -1,233 +0,0 @@
package orm
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L2Block represents a l2 block in the database.
type L2Block struct {
db *gorm.DB `gorm:"column:-"`
// block
Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Header string `json:"header" gorm:"header"`
Transactions string `json:"transactions" gorm:"transactions"`
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"`
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
// chunk
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewL2Block creates a new L2Block instance
func NewL2Block(db *gorm.DB) *L2Block {
return &L2Block{db: db}
}
// TableName returns the name of the "l2_block" table.
func (*L2Block) TableName() string {
return "l2_block"
}
// GetL2BlocksLatestHeight retrieves the height of the latest L2 block.
// If the l2_block table is empty, it returns 0 to represent the genesis block height.
func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("COALESCE(MAX(number), 0)")
var maxNumber uint64
if err := db.Row().Scan(&maxNumber); err != nil {
return 0, fmt.Errorf("L2Block.GetL2BlocksLatestHeight error: %w", err)
}
return maxNumber, nil
}
// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk.
// The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("chunk_hash IS NULL")
db = db.Order("number ASC")
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
}
var wrappedBlocks []*types.WrappedBlock
for _, v := range l2Blocks {
var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
}
wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
}
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
}
return wrappedBlocks, nil
}
// GetL2Blocks retrieves selected L2Blocks from the database.
// The returned L2Blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*L2Block, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
for key, value := range fields {
db = db.Where(key, value)
}
for _, orderBy := range orderByList {
db = db.Order(orderBy)
}
if limit > 0 {
db = db.Limit(limit)
}
db = db.Order("number ASC")
var l2Blocks []*L2Block
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetL2Blocks error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
}
return l2Blocks, nil
}
// GetL2BlocksInRange retrieves the L2 blocks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end block numbers.
// The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*types.WrappedBlock, error) {
if startBlockNumber > endBlockNumber {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: start block number should be less than or equal to end block number, start block: %v, end block: %v", startBlockNumber, endBlockNumber)
}
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_trie_root")
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
db = db.Order("number ASC")
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
// sanity check
if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: unexpected number of results, expected: %v, got: %v", endBlockNumber-startBlockNumber+1, len(l2Blocks))
}
var wrappedBlocks []*types.WrappedBlock
for _, v := range l2Blocks {
var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
}
return wrappedBlocks, nil
}
// InsertL2Blocks inserts l2 blocks into the "l2_block" table.
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlock) error {
var l2Blocks []L2Block
for _, block := range blocks {
header, err := json.Marshal(block.Header)
if err != nil {
log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
txs, err := json.Marshal(block.Transactions)
if err != nil {
log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
l2Block := L2Block{
Number: block.Header.Number.Uint64(),
Hash: block.Header.Hash().String(),
ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs),
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(),
TxNum: uint32(len(block.Transactions)),
GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time,
Header: string(header),
}
l2Blocks = append(l2Blocks, l2Block)
}
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
if err := db.Create(&l2Blocks).Error; err != nil {
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
return nil
}
// UpdateChunkHashInRange updates the chunk_hash of block tx within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end indices.
// This function ensures the number of rows updated must equal to (endIndex - startIndex + 1).
// If the rows affected do not match this expectation, an error is returned.
func (o *L2Block) UpdateChunkHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, chunkHash string, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Where("number >= ? AND number <= ?", startIndex, endIndex)
tx := db.Update("chunk_hash", chunkHash)
if tx.Error != nil {
return fmt.Errorf("L2Block.UpdateChunkHashInRange error: %w, start index: %v, end index: %v, chunk hash: %v", tx.Error, startIndex, endIndex, chunkHash)
}
// sanity check
if uint64(tx.RowsAffected) != endIndex-startIndex+1 {
return fmt.Errorf("L2Block.UpdateChunkHashInRange: incorrect number of rows affected, expected: %v, got: %v", endIndex-startIndex+1, tx.RowsAffected)
}
return nil
}

View File

@@ -1,240 +0,0 @@
package orm
import (
"context"
"encoding/json"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
)
var (
base *docker.App
db *gorm.DB
l2BlockOrm *L2Block
chunkOrm *Chunk
batchOrm *Batch
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
chunk1 *types.Chunk
chunk2 *types.Chunk
chunkHash1 common.Hash
chunkHash2 common.Hash
)
func TestMain(m *testing.M) {
t := &testing.T{}
setupEnv(t)
defer tearDownEnv(t)
m.Run()
}
func setupEnv(t *testing.T) {
base = docker.NewDockerApp()
base.RunDBImage(t)
var err error
db, err = database.InitDB(
&database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
},
)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
batchOrm = NewBatch(db)
chunkOrm = NewChunk(db)
l2BlockOrm = NewL2Block(db)
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock1 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
assert.NoError(t, err)
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
assert.NoError(t, err)
chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
chunkHash1, err = chunk1.Hash(0)
assert.NoError(t, err)
chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
assert.NoError(t, err)
}
func tearDownEnv(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
sqlDB.Close()
base.Free()
}
func TestL2BlockOrm(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(3), height)
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background())
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, wrappedBlock1, blocks[0])
assert.Equal(t, wrappedBlock2, blocks[1])
blocks, err = l2BlockOrm.GetL2BlocksInRange(context.Background(), 2, 3)
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, wrappedBlock1, blocks[0])
assert.Equal(t, wrappedBlock2, blocks[1])
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 2, 2, "test hash")
assert.NoError(t, err)
blocks, err = l2BlockOrm.GetUnchunkedBlocks(context.Background())
assert.NoError(t, err)
assert.Len(t, blocks, 1)
assert.Equal(t, wrappedBlock2, blocks[0])
}
func TestChunkOrm(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
assert.NoError(t, err)
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
assert.NoError(t, err)
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash2.Hex(), types.ProvingTaskAssigned)
assert.NoError(t, err)
chunks, err = chunkOrm.GetChunksInRange(context.Background(), 0, 1)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(chunks[0].ProvingStatus))
assert.Equal(t, types.ProvingTaskAssigned, types.ProvingStatus(chunks[1].ProvingStatus))
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
assert.NoError(t, err)
chunks, err = chunkOrm.GetUnbatchedChunks(context.Background())
assert.NoError(t, err)
assert.Len(t, chunks, 1)
}
func TestBatchOrm(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
assert.NoError(t, err)
hash1 := batch1.Hash
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err)
batchHeader1, err := types.DecodeBatchHeader(batch1.BatchHeader)
assert.NoError(t, err)
batchHash1 := batchHeader1.Hash().Hex()
assert.Equal(t, hash1, batchHash1)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
assert.NoError(t, err)
hash2 := batch2.Hash
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
assert.NoError(t, err)
batchHeader2, err := types.DecodeBatchHeader(batch2.BatchHeader)
assert.NoError(t, err)
batchHash2 := batchHeader2.Hash().Hex()
assert.Equal(t, hash2, batchHash2)
count, err := batchOrm.GetBatchCount(context.Background())
assert.NoError(t, err)
assert.Equal(t, uint64(2), count)
pendingBatches, err := batchOrm.GetPendingBatches(context.Background(), 100)
assert.NoError(t, err)
assert.Equal(t, 2, len(pendingBatches))
rollupStatus, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash1, batchHash2})
assert.NoError(t, err)
assert.Equal(t, 2, len(rollupStatus))
assert.Equal(t, types.RollupPending, rollupStatus[0])
assert.Equal(t, types.RollupPending, rollupStatus[1])
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err)
assert.Nil(t, dbProof)
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
assert.NoError(t, err)
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
assert.NoError(t, err)
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash2, "commitTxHash", types.RollupCommitted)
assert.NoError(t, err)
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
assert.NoError(t, err)
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
}

View File

@@ -1,10 +1,6 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
import {BatchHeaderV0Codec} from "../../contracts/src/libraries/codec/BatchHeaderV0Codec.sol";
import {ChunkCodec} from "../../contracts/src/libraries/codec/ChunkCodec.sol";
import {IL1MessageQueue} from "../../contracts/src/L1/rollup/IL1MessageQueue.sol";
contract MockBridgeL1 {
/******************************
* Events from L1MessageQueue *
@@ -21,7 +17,7 @@ contract MockBridgeL1 {
address indexed sender,
address indexed target,
uint256 value,
uint64 queueIndex,
uint256 queueIndex,
uint256 gasLimit,
bytes data
);
@@ -50,27 +46,74 @@ contract MockBridgeL1 {
/// @param messageHash The hash of the message.
event RelayedMessage(bytes32 indexed messageHash);
/// @dev The maximum number of transaction in on batch.
uint256 public immutable maxNumTxInBatch;
/// @dev The hash used for padding public inputs.
bytes32 public immutable paddingTxHash;
/***************************
* Events from ScrollChain *
***************************/
/// @notice Emitted when a new batch is committed.
/// @param batchHash The hash of the batch.
/// @notice Emitted when a new batch is commited.
/// @param batchHash The hash of the batch
event CommitBatch(bytes32 indexed batchHash);
/// @notice Emitted when a batch is reverted.
/// @param batchHash The identification of the batch.
event RevertBatch(bytes32 indexed batchHash);
/// @notice Emitted when a batch is finalized.
/// @param batchHash The hash of the batch
/// @param stateRoot The state root in layer 2 after this batch.
/// @param withdrawRoot The merkle root in layer2 after this batch.
event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
event FinalizeBatch(bytes32 indexed batchHash);
/***********
* Structs *
***********/
struct BlockContext {
// The hash of this block.
bytes32 blockHash;
// The parent hash of this block.
bytes32 parentHash;
// The height of this block.
uint64 blockNumber;
// The timestamp of this block.
uint64 timestamp;
// The base fee of this block.
// Currently, it is not used, because we disable EIP-1559.
// We keep it for future proof.
uint256 baseFee;
// The gas limit of this block.
uint64 gasLimit;
// The number of transactions in this block, both L1 & L2 txs.
uint16 numTransactions;
// The number of l1 messages in this block.
uint16 numL1Messages;
}
struct Batch {
// The list of blocks in this batch
BlockContext[] blocks; // MAX_NUM_BLOCKS = 100, about 5 min
// The state root of previous batch.
// The first batch will use 0x0 for prevStateRoot
bytes32 prevStateRoot;
// The state root of the last block in this batch.
bytes32 newStateRoot;
// The withdraw trie root of the last block in this batch.
bytes32 withdrawTrieRoot;
// The index of the batch.
uint64 batchIndex;
// The parent batch hash.
bytes32 parentBatchHash;
// Concatenated raw data of RLP encoded L2 txs
bytes l2Transactions;
}
struct L2MessageProof {
// The index of the batch where the message belongs to.
uint256 batchIndex;
// The hash of the batch where the message belongs to.
bytes32 batchHash;
// Concatenation of merkle proof for withdraw merkle trie.
bytes merkleProof;
}
@@ -82,7 +125,14 @@ contract MockBridgeL1 {
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
mapping(uint256 => bytes32) public committedBatches;
/***************
* Constructor *
***************/
constructor() {
maxNumTxInBatch = 44;
paddingTxHash = 0x0000000000000000000000000000000000000000000000000000000000000000;
}
/***********************************
* Functions from L2GasPriceOracle *
@@ -104,7 +154,7 @@ contract MockBridgeL1 {
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, target, value, messageNonce, message);
{
address _sender = applyL1ToL2Alias(address(this));
emit QueueTransaction(_sender, target, 0, uint64(messageNonce), gasLimit, _xDomainCalldata);
emit QueueTransaction(_sender, target, 0, messageNonce, gasLimit, _xDomainCalldata);
}
emit SentMessage(msg.sender, target, value, messageNonce, gasLimit, message);
@@ -128,65 +178,37 @@ contract MockBridgeL1 {
* Functions from ScrollChain *
******************************/
function commitBatch(
uint8 /*version*/,
bytes calldata /*parentBatchHeader*/,
bytes[] memory chunks,
bytes calldata /*skippedL1MessageBitmap*/
) external {
// check whether the batch is empty
uint256 _chunksLength = chunks.length;
require(_chunksLength > 0, "batch is empty");
function commitBatch(Batch memory _batch) external {
_commitBatch(_batch);
}
uint256 dataPtr;
assembly {
dataPtr := mload(0x40)
mstore(0x40, add(dataPtr, mul(_chunksLength, 32)))
function commitBatches(Batch[] memory _batches) external {
for (uint256 i = 0; i < _batches.length; i++) {
_commitBatch(_batches[i]);
}
}
for (uint256 i = 0; i < _chunksLength; i++) {
_commitChunk(dataPtr, chunks[i]);
unchecked {
dataPtr += 32;
}
}
bytes32 _dataHash;
assembly {
let dataLen := mul(_chunksLength, 0x20)
_dataHash := keccak256(sub(dataPtr, dataLen), dataLen)
}
bytes memory paddedData = new bytes(89);
assembly {
mstore(add(paddedData, 57), _dataHash)
}
uint256 batchPtr;
assembly {
batchPtr := add(paddedData, 32)
}
bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89);
committedBatches[0] = _batchHash;
emit CommitBatch(_batchHash);
function revertBatch(bytes32 _batchHash) external {
emit RevertBatch(_batchHash);
}
function finalizeBatchWithProof(
bytes calldata /*batchHeader*/,
bytes32 /*prevStateRoot*/,
bytes32 postStateRoot,
bytes32 withdrawRoot,
bytes calldata /*aggrProof*/
bytes32 _batchHash,
uint256[] memory,
uint256[] memory
) external {
bytes32 _batchHash = committedBatches[0];
emit FinalizeBatch(_batchHash, postStateRoot, withdrawRoot);
emit FinalizeBatch(_batchHash);
}
/**********************
* Internal Functions *
**********************/
function _commitBatch(Batch memory _batch) internal {
bytes32 _batchHash = _computePublicInputHash(_batch);
emit CommitBatch(_batchHash);
}
/// @dev Internal function to generate the correct cross domain calldata for a message.
/// @param _sender Message sender address.
/// @param _target Target contract address.
@@ -212,10 +234,6 @@ contract MockBridgeL1 {
);
}
/// @notice Utility function that converts the address in the L1 that submitted a tx to
/// the inbox to the msg.sender viewed in the L2
/// @param l1Address the address in the L1 that triggered the tx to L2
/// @return l2Address L2 address as viewed in msg.sender
function applyL1ToL2Alias(address l1Address) internal pure returns (address l2Address) {
uint160 offset = uint160(0x1111000000000000000000000000000000001111);
unchecked {
@@ -223,67 +241,140 @@ contract MockBridgeL1 {
}
}
function _commitChunk(
uint256 memPtr,
bytes memory _chunk
) internal pure {
uint256 chunkPtr;
uint256 startDataPtr;
uint256 dataPtr;
uint256 blockPtr;
assembly {
dataPtr := mload(0x40)
startDataPtr := dataPtr
chunkPtr := add(_chunk, 0x20) // skip chunkLength
blockPtr := add(chunkPtr, 1) // skip numBlocks
}
uint256 _numBlocks = ChunkCodec.validateChunkLength(chunkPtr, _chunk.length);
// concatenate block contexts
uint256 _totalTransactionsInChunk;
for (uint256 i = 0; i < _numBlocks; i++) {
dataPtr = ChunkCodec.copyBlockContext(chunkPtr, dataPtr, i);
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
unchecked {
_totalTransactionsInChunk += _numTransactionsInBlock;
blockPtr += ChunkCodec.BLOCK_CONTEXT_LENGTH;
/// @dev Internal function to compute the public input hash.
/// @param batch The batch to compute.
function _computePublicInputHash(Batch memory batch)
internal
view
returns (
bytes32
)
{
uint256 publicInputsPtr;
// 1. append prevStateRoot, newStateRoot and withdrawTrieRoot to public inputs
{
bytes32 prevStateRoot = batch.prevStateRoot;
bytes32 newStateRoot = batch.newStateRoot;
bytes32 withdrawTrieRoot = batch.withdrawTrieRoot;
// number of bytes in public inputs: 32 * 3 + 124 * blocks + 32 * MAX_NUM_TXS
uint256 publicInputsSize = 32 * 3 + batch.blocks.length * 124 + 32 * maxNumTxInBatch;
assembly {
publicInputsPtr := mload(0x40)
mstore(0x40, add(publicInputsPtr, publicInputsSize))
mstore(publicInputsPtr, prevStateRoot)
publicInputsPtr := add(publicInputsPtr, 0x20)
mstore(publicInputsPtr, newStateRoot)
publicInputsPtr := add(publicInputsPtr, 0x20)
mstore(publicInputsPtr, withdrawTrieRoot)
publicInputsPtr := add(publicInputsPtr, 0x20)
}
}
assembly {
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
blockPtr := add(chunkPtr, 1) // reset block ptr
}
uint64 numTransactionsInBatch;
BlockContext memory _block;
// 2. append block information to public inputs.
for (uint256 i = 0; i < batch.blocks.length; i++) {
// validate blocks, we won't check first block against previous batch.
{
BlockContext memory _currentBlock = batch.blocks[i];
if (i > 0) {
require(_block.blockHash == _currentBlock.parentHash, "Parent hash mismatch");
require(_block.blockNumber + 1 == _currentBlock.blockNumber, "Block number mismatch");
}
_block = _currentBlock;
}
// concatenate tx hashes
uint256 l2TxPtr = ChunkCodec.l2TxPtr(chunkPtr, _numBlocks);
while (_numBlocks > 0) {
// concatenate l2 transaction hashes
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
for (uint256 j = 0; j < _numTransactionsInBlock; j++) {
bytes32 txHash;
(txHash, l2TxPtr) = ChunkCodec.loadL2TxHash(l2TxPtr);
// append blockHash and parentHash to public inputs
{
bytes32 blockHash = _block.blockHash;
bytes32 parentHash = _block.parentHash;
assembly {
mstore(dataPtr, txHash)
dataPtr := add(dataPtr, 0x20)
mstore(publicInputsPtr, blockHash)
publicInputsPtr := add(publicInputsPtr, 0x20)
mstore(publicInputsPtr, parentHash)
publicInputsPtr := add(publicInputsPtr, 0x20)
}
}
// append blockNumber and blockTimestamp to public inputs
{
uint256 blockNumber = _block.blockNumber;
uint256 blockTimestamp = _block.timestamp;
assembly {
mstore(publicInputsPtr, shl(192, blockNumber))
publicInputsPtr := add(publicInputsPtr, 0x8)
mstore(publicInputsPtr, shl(192, blockTimestamp))
publicInputsPtr := add(publicInputsPtr, 0x8)
}
}
// append baseFee to public inputs
{
uint256 baseFee = _block.baseFee;
assembly {
mstore(publicInputsPtr, baseFee)
publicInputsPtr := add(publicInputsPtr, 0x20)
}
}
uint64 numTransactionsInBlock = _block.numTransactions;
// gasLimit, numTransactions and numL1Messages to public inputs
{
uint256 gasLimit = _block.gasLimit;
uint256 numL1MessagesInBlock = _block.numL1Messages;
assembly {
mstore(publicInputsPtr, shl(192, gasLimit))
publicInputsPtr := add(publicInputsPtr, 0x8)
mstore(publicInputsPtr, shl(240, numTransactionsInBlock))
publicInputsPtr := add(publicInputsPtr, 0x2)
mstore(publicInputsPtr, shl(240, numL1MessagesInBlock))
publicInputsPtr := add(publicInputsPtr, 0x2)
}
}
numTransactionsInBatch += numTransactionsInBlock;
}
require(numTransactionsInBatch <= maxNumTxInBatch, "Too many transactions in batch");
unchecked {
_numBlocks -= 1;
blockPtr += ChunkCodec.BLOCK_CONTEXT_LENGTH;
// 3. append transaction hash to public inputs.
uint256 _l2TxnPtr;
{
bytes memory l2Transactions = batch.l2Transactions;
assembly {
_l2TxnPtr := add(l2Transactions, 0x20)
}
}
for (uint256 i = 0; i < batch.blocks.length; i++) {
uint256 numL1MessagesInBlock = batch.blocks[i].numL1Messages;
require(numL1MessagesInBlock == 0);
uint256 numTransactionsInBlock = batch.blocks[i].numTransactions;
for (uint256 j = numL1MessagesInBlock; j < numTransactionsInBlock; ++j) {
bytes32 hash;
assembly {
let txPayloadLength := shr(224, mload(_l2TxnPtr))
_l2TxnPtr := add(_l2TxnPtr, 4)
_l2TxnPtr := add(_l2TxnPtr, txPayloadLength)
hash := keccak256(sub(_l2TxnPtr, txPayloadLength), txPayloadLength)
mstore(publicInputsPtr, hash)
publicInputsPtr := add(publicInputsPtr, 0x20)
}
}
}
// check chunk has correct length
require(l2TxPtr - chunkPtr == _chunk.length, "incomplete l2 transaction data");
// compute data hash and store to memory
assembly {
let dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
mstore(memPtr, dataHash)
// 4. append padding transaction to public inputs.
bytes32 txHashPadding = paddingTxHash;
for (uint256 i = numTransactionsInBatch; i < maxNumTxInBatch; i++) {
assembly {
mstore(publicInputsPtr, txHashPadding)
publicInputsPtr := add(publicInputsPtr, 0x20)
}
}
// 5. compute public input hash
bytes32 publicInputHash;
{
uint256 publicInputsSize = 32 * 3 + batch.blocks.length * 124 + 32 * maxNumTxInBatch;
assembly {
publicInputHash := keccak256(sub(publicInputsPtr, publicInputsSize), publicInputsSize)
}
}
return publicInputHash;
}
}

View File

@@ -11,6 +11,24 @@ contract MockBridgeL2 {
/// @param messageHash The hash of the corresponding message.
event AppendMessage(uint256 index, bytes32 messageHash);
/********************************
* Events from L1BlockContainer *
********************************/
/// @notice Emitted when a block is imported.
/// @param blockHash The hash of the imported block.
/// @param blockHeight The height of the imported block.
/// @param blockTimestamp The timestamp of the imported block.
/// @param baseFee The base fee of the imported block.
/// @param stateRoot The state root of the imported block.
event ImportBlock(
bytes32 indexed blockHash,
uint256 blockHeight,
uint256 blockTimestamp,
uint256 baseFee,
bytes32 stateRoot
);
/*********************************
* Events from L2ScrollMessenger *
*********************************/

View File

@@ -6,25 +6,27 @@ import (
"math/big"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/database"
"scroll-tech/common/metrics"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/sender"
)
var (
bridgeL1MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
)
// Layer1Relayer is responsible for
@@ -36,6 +38,7 @@ var (
type Layer1Relayer struct {
ctx context.Context
db database.OrmFactory
cfg *config.RelayerConfig
// channel used to communicate with transaction sender
@@ -50,13 +53,10 @@ type Layer1Relayer struct {
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
l1MessageOrm *orm.L1Message
l1BlockOrm *orm.L1Block
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
func NewLayer1Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
@@ -88,15 +88,14 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
}
l1Relayer := &Layer1Relayer{
ctx: ctx,
l1MessageOrm: orm.NewL1Message(db),
l1BlockOrm: orm.NewL1Block(db),
ctx: ctx,
db: db,
messageSender: messageSender,
l2MessengerABI: bridgeAbi.L2ScrollMessengerABI,
l2MessengerABI: bridge_abi.L2ScrollMessengerABI,
gasOracleSender: gasOracleSender,
l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI,
l1GasOracleABI: bridge_abi.L1GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
@@ -113,7 +112,7 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer1Relayer) ProcessSavedEvents() {
// msgs are sorted by nonce in increasing order
msgs, err := r.l1MessageOrm.GetL1MessagesByStatus(types.MsgPending, 100)
msgs, err := r.db.GetL1MessagesByStatus(types.MsgPending, 100)
if err != nil {
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
return
@@ -124,8 +123,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
}
for _, msg := range msgs {
tmpMsg := msg
if err = r.processSavedEvent(&tmpMsg); err != nil {
if err = r.processSavedEvent(msg); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
@@ -134,15 +132,15 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
}
}
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
func (r *Layer1Relayer) processSavedEvent(msg *types.L1Message) error {
calldata := common.Hex2Bytes(msg.Calldata)
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata, r.minGasLimitForMessageRelay)
if err != nil && errors.Is(err, ErrExecutionRevertedMessageExpired) {
return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && errors.Is(err, ErrExecutionRevertedAlreadySuccessExecuted) {
return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata, r.minGasLimitForMessageRelay)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
}
if err != nil {
return err
@@ -150,7 +148,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
bridgeL1MsgsRelayedTotalCounter.Inc(1)
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
if err != nil {
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err)
}
@@ -159,17 +157,17 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() {
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
latestBlockHeight, err := r.db.GetLatestL1BlockHeight()
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
return
}
blocks, err := r.l1BlockOrm.GetL1Blocks(r.ctx, map[string]interface{}{
blocks, err := r.db.GetL1BlockInfos(map[string]interface{}{
"number": latestBlockHeight,
})
if err != nil {
log.Error("Failed to GetL1Blocks from db", "height", latestBlockHeight, "err", err)
log.Error("Failed to GetL1BlockInfos from db", "height", latestBlockHeight, "err", err)
return
}
if len(blocks) != 1 {
@@ -178,7 +176,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
}
block := blocks[0]
if types.GasOracleStatus(block.GasOracleStatus) == types.GasOraclePending {
if block.GasOracleStatus == types.GasOraclePending {
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
// last is undefine or (block.BaseFee >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (block.BaseFee >= r.minGasPrice && (block.BaseFee >= r.lastGasPrice+expectedDelta || block.BaseFee <= r.lastGasPrice-expectedDelta)) {
@@ -197,7 +195,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return
}
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
err = r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return
@@ -216,14 +214,14 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.messageSender.ConfirmChan():
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful {
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
@@ -232,14 +230,14 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
err := r.db.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
}

View File

@@ -0,0 +1,155 @@
package relayer
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/sender"
"scroll-tech/database"
)
var (
templateL1Message = []*types.L1Message{
{
QueueIndex: 1,
MsgHash: "msg_hash1",
Height: 1,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "0x19ece",
GasLimit: 11529940,
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer1Hash: "hash0",
},
{
QueueIndex: 2,
MsgHash: "msg_hash2",
Height: 2,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "0x19ece",
GasLimit: 11529940,
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer1Hash: "hash1",
},
}
)
// testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
func testL1RelayerProcessSaveEvents(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
assert.NoError(t, db.SaveL1Messages(context.Background(), templateL1Message))
relayer.ProcessSavedEvents()
msg1, err := db.GetL1MessageByQueueIndex(1)
assert.NoError(t, err)
assert.Equal(t, msg1.Status, types.MsgSubmitted)
msg2, err := db.GetL1MessageByQueueIndex(2)
assert.NoError(t, err)
assert.Equal(t, msg2.Status, types.MsgSubmitted)
}
func testL1RelayerMsgConfirm(t *testing.T) {
// Set up the database and defer closing it.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
assert.NoError(t, db.SaveL1Messages(context.Background(),
[]*types.L1Message{
{MsgHash: "msg-1", QueueIndex: 0},
{MsgHash: "msg-2", QueueIndex: 1},
}))
// Create and set up the Layer1 Relayer.
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: "msg-1",
IsSuccessful: true,
})
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: "msg-2",
IsSuccessful: false,
})
// Check the database for the updated status using TryTimes.
utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL1MessageByMsgHash("msg-1")
msg2, err2 := db.GetL1MessageByMsgHash("msg-2")
return err1 == nil && msg1.Status == types.MsgConfirmed &&
err2 == nil && msg2.Status == types.MsgRelayFailed
})
}
func testL1RelayerGasOracleConfirm(t *testing.T) {
// Set up the database and defer closing it.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
assert.NoError(t, db.InsertL1Blocks(context.Background(),
[]*types.L1BlockInfo{
{Hash: "gas-oracle-1", Number: 0},
{Hash: "gas-oracle-2", Number: 1},
}))
// Create and set up the Layer2 Relayer.
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ID: "gas-oracle-1",
IsSuccessful: true,
})
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ID: "gas-oracle-2",
IsSuccessful: false,
})
// Check the database for the updated status using TryTimes.
utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := db.GetL1BlockInfos(map[string]interface{}{"hash": "gas-oracle-2"})
return err1 == nil && len(msg1) == 1 && msg1[0].GasOracleStatus == types.GasOracleImported &&
err2 == nil && len(msg2) == 1 && msg2[0].GasOracleStatus == types.GasOracleFailed
})
}

View File

@@ -0,0 +1,593 @@
package relayer
import (
"context"
"errors"
"fmt"
"math/big"
"runtime"
"sync"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/sender"
"scroll-tech/bridge/utils"
)
var (
bridgeL2MsgsRelayedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2MsgsRelayedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesSkippedTotalCounter = geth_metrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
//
// Actions are triggered by new head from layer 1 geth node.
// @todo It's better to be triggered by watcher.
type Layer2Relayer struct {
ctx context.Context
l2Client *ethclient.Client
db database.OrmFactory
cfg *config.RelayerConfig
messageSender *sender.Sender
l1MessengerABI *abi.ABI
rollupSender *sender.Sender
l1RollupABI *abi.ABI
gasOracleSender *sender.Sender
l2GasOracleABI *abi.ABI
minGasLimitForMessageRelay uint64
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
// A list of processing batches commitment.
// key(string): confirmation ID, value([]string): batch hashes.
processingBatchesCommitment sync.Map
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch hash.
processingFinalization sync.Map
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
log.Error("Failed to create messenger sender", "err", err)
return nil, err
}
rollupSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.RollupSenderPrivateKeys)
if err != nil {
log.Error("Failed to create rollup sender", "err", err)
return nil, err
}
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
if err != nil {
log.Error("Failed to create gas oracle sender", "err", err)
return nil, err
}
var minGasPrice uint64
var gasPriceDiff uint64
if cfg.GasOracleConfig != nil {
minGasPrice = cfg.GasOracleConfig.MinGasPrice
gasPriceDiff = cfg.GasOracleConfig.GasPriceDiff
} else {
minGasPrice = 0
gasPriceDiff = defaultGasPriceDiff
}
minGasLimitForMessageRelay := uint64(defaultL2MessageRelayMinGasLimit)
if cfg.MessageRelayMinGasLimit != 0 {
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
}
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
l2Client: l2Client,
messageSender: messageSender,
l1MessengerABI: bridge_abi.L1ScrollMessengerABI,
rollupSender: rollupSender,
l1RollupABI: bridge_abi.ScrollChainABI,
gasOracleSender: gasOracleSender,
l2GasOracleABI: bridge_abi.L2GasPriceOracleABI,
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
processingMessage: sync.Map{},
processingBatchesCommitment: sync.Map{},
processingFinalization: sync.Map{},
}
go layer2Relayer.handleConfirmLoop(ctx)
return layer2Relayer, nil
}
const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() {
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return
}
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": types.MsgPending},
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
)
if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return
}
// process messages in batches
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
if size = len(msgs); size > batchSize {
size = batchSize
}
var g errgroup.Group
for _, msg := range msgs[:size] {
msg := msg
g.Go(func() error {
return r.processSavedEvent(msg)
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process l2 saved event", "err", err)
}
return
}
}
}
func (r *Layer2Relayer) processSavedEvent(msg *types.L2Message) error {
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// Get the block info that contains the message
blockInfos, err := r.db.GetL2BlockInfos(map[string]interface{}{"number": msg.Height})
if err != nil {
log.Error("Failed to GetL2BlockInfos from DB", "number", msg.Height)
}
blockInfo := blockInfos[0]
if !blockInfo.BatchHash.Valid {
log.Error("Block has not been batched yet", "number", blockInfo.Number, "msg.nonce", msg.Nonce)
return nil
}
// TODO: rebuild the withdraw trie to generate the merkle proof
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BatchHash: common.HexToHash(blockInfo.BatchHash.String),
MerkleProof: make([]byte, 0),
}
from := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
value, ok := big.NewInt(0).SetString(msg.Value, 10)
if !ok {
// @todo maybe panic?
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// TODO: need to skip this message by changing its status to MsgError
}
msgNonce := big.NewInt(int64(msg.Nonce))
calldata := common.Hex2Bytes(msg.Calldata)
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, msgNonce, calldata, proof)
if err != nil {
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return err
}
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, r.minGasLimitForMessageRelay)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message was already successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
}
return err
}
bridgeL2MsgsRelayedTotalCounter.Inc(1)
log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String())
// save status in db
// @todo handle db error
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
if err != nil {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err
}
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
return nil
}
// ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() {
batch, err := r.db.GetLatestBatch()
if err != nil {
log.Error("Failed to GetLatestBatch", "err", err)
return
}
if batch.OracleStatus == types.GasOraclePending {
suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx)
if err != nil {
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err)
return
}
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64 <= r.lastGasPrice-expectedDelta)) {
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
if err != nil {
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
return
}
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
}
return
}
err = r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
return
}
r.lastGasPrice = suggestGasPriceUint64
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
}
}
}
// SendCommitTx sends commitBatches tx to L1.
func (r *Layer2Relayer) SendCommitTx(batchData []*types.BatchData) error {
if len(batchData) == 0 {
log.Error("SendCommitTx receives empty batch")
return nil
}
// pack calldata
commitBatches := make([]bridge_abi.IScrollChainBatch, len(batchData))
for i, batch := range batchData {
commitBatches[i] = batch.Batch
}
calldata, err := r.l1RollupABI.Pack("commitBatches", commitBatches)
if err != nil {
log.Error("Failed to pack commitBatches",
"error", err,
"start_batch_index", commitBatches[0].BatchIndex,
"end_batch_index", commitBatches[len(commitBatches)-1].BatchIndex)
return err
}
// generate a unique txID and send transaction
var bytes []byte
for _, batch := range batchData {
bytes = append(bytes, batch.Hash().Bytes()...)
}
txID := crypto.Keccak256Hash(bytes).String()
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send commitBatches tx to layer1 ", "err", err)
}
return err
}
bridgeL2BatchesCommittedTotalCounter.Inc(int64(len(commitBatches)))
log.Info("Sent the commitBatches tx to layer1",
"tx_hash", txHash.Hex(),
"start_batch_index", commitBatches[0].BatchIndex,
"end_batch_index", commitBatches[len(commitBatches)-1].BatchIndex)
// record and sync with db, @todo handle db error
batchHashes := make([]string, len(batchData))
for i, batch := range batchData {
batchHashes[i] = batch.Hash().Hex()
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHashes[i], txHash.String(), types.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batchHashes[i], "index", batch.Batch.BatchIndex, "err", err)
}
}
r.processingBatchesCommitment.Store(txID, batchHashes)
return nil
}
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() {
// set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
bridgeL2BatchesSkippedTotalCounter.Inc(count)
log.Info("Skipping batches", "count", count)
}
// batches are sorted by batch index in increasing order
batchHashes, err := r.db.GetCommittedBatches(1)
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
return
}
if len(batchHashes) == 0 {
return
}
hash := batchHashes[0]
// @todo add support to relay multiple batches
batches, err := r.db.GetBlockBatches(map[string]interface{}{"hash": hash}, "LIMIT 1")
if err != nil {
log.Error("Failed to fetch committed L2 batch", "hash", hash, "err", err)
return
}
if len(batches) == 0 {
log.Error("Unexpected result for GetBlockBatches", "hash", hash, "len", 0)
return
}
batch := batches[0]
status := batch.ProvingStatus
switch status {
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
// The proof for this block is not ready yet.
return
case types.ProvingTaskProved:
// It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case types.ProvingTaskFailed, types.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
success := false
previousBatch, err := r.db.GetLatestFinalizingOrFinalizedBatch()
// skip submitting proof
if err == nil && uint64(batch.CreatedAt.Sub(*previousBatch.CreatedAt).Seconds()) < r.cfg.FinalizeBatchIntervalSec {
log.Info(
"Not enough time passed, skipping",
"hash", hash,
"createdAt", batch.CreatedAt,
"lastFinalizingHash", previousBatch.Hash,
"lastFinalizingStatus", previousBatch.RollupStatus,
"lastFinalizingCreatedAt", previousBatch.CreatedAt,
)
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
} else {
success = true
}
return
}
// handle unexpected db error
if err != nil && err.Error() != "sql: no rows in result set" {
log.Error("Failed to get latest finalized batch", "err", err)
return
}
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash)
if err = r.db.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
}
}()
proofBuffer, icBuffer, err := r.db.GetVerifiedProofAndInstanceCommitmentsByHash(hash)
if err != nil {
log.Warn("fetch get proof by hash failed", "hash", hash, "err", err)
return
}
if proofBuffer == nil || icBuffer == nil {
log.Warn("proof or instance not ready", "hash", hash)
return
}
if len(proofBuffer)%32 != 0 {
log.Error("proof buffer has wrong length", "hash", hash, "length", len(proofBuffer))
return
}
if len(icBuffer)%32 != 0 {
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(icBuffer))
return
}
proof := utils.BufferToUint256Le(proofBuffer)
instance := utils.BufferToUint256Le(icBuffer)
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, instance)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return
}
txID := hash + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("finalizeBatchWithProof in layer1 failed", "hash", hash, "err", err)
}
return
}
bridgeL2BatchesFinalizedTotalCounter.Inc(1)
log.Info("finalizeBatchWithProof in layer1", "batch_hash", hash, "tx_hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", hash, "err", err)
}
success = true
r.processingFinalization.Store(txID, hash)
default:
log.Error("encounter unreachable case in ProcessCommittedBatches",
"block_status", status,
)
}
}
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is message relay transaction
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
transactionType = "MessageRelay"
var status types.MsgStatus
if confirmation.IsSuccessful {
status = types.MsgConfirmed
} else {
status = types.MsgRelayFailed
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
}
// @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), status, confirmation.TxHash.String())
if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
}
bridgeL2MsgsRelayedConfirmedTotalCounter.Inc(1)
r.processingMessage.Delete(confirmation.ID)
}
// check whether it is CommitBatches transaction
if batchBatches, ok := r.processingBatchesCommitment.Load(confirmation.ID); ok {
transactionType = "BatchesCommitment"
batchHashes := batchBatches.([]string)
var status types.RollupStatus
if confirmation.IsSuccessful {
status = types.RollupCommitted
} else {
status = types.RollupCommitFailed
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
}
for _, batchHash := range batchHashes {
// @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), status)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err)
}
}
bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(int64(len(batchHashes)))
r.processingBatchesCommitment.Delete(confirmation.ID)
}
// check whether it is proof finalization transaction
if batchHash, ok := r.processingFinalization.Load(confirmation.ID); ok {
transactionType = "ProofFinalization"
var status types.RollupStatus
if confirmation.IsSuccessful {
status = types.RollupFinalized
} else {
status = types.RollupFinalizeFailed
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
}
// @todo handle db error
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err)
}
bridgeL2BatchesFinalizedConfirmedTotalCounter.Inc(1)
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case confirmation := <-r.messageSender.ConfirmChan():
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupSender.ConfirmChan():
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleSender.ConfirmChan():
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else {
// @todo handle db error
err := r.db.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer1", "confirmation", cfm)
}
}
}
}

View File

@@ -0,0 +1,384 @@
package relayer
import (
"context"
"encoding/json"
"math/big"
"os"
"strconv"
"testing"
"github.com/scroll-tech/go-ethereum/common"
geth_types "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/bridge/sender"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
var (
templateL2Message = []*types.L2Message{
{
Nonce: 1,
Height: 1,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "100",
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer2Hash: "hash0",
},
}
)
func testCreateNewRelayer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
func testL2RelayerProcessSaveEvents(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
err = db.SaveL2Messages(context.Background(), templateL2Message)
assert.NoError(t, err)
traces := []*types.WrappedBlock{
{
Header: &geth_types.Header{
Number: big.NewInt(int64(templateL2Message[0].Height)),
},
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
},
{
Header: &geth_types.Header{
Number: big.NewInt(int64(templateL2Message[0].Height + 1)),
},
Transactions: nil,
WithdrawTrieRoot: common.Hash{},
},
}
assert.NoError(t, db.InsertWrappedBlocks(traces))
parentBatch1 := &types.BlockBatch{
Index: 0,
Hash: common.Hash{}.Hex(),
StateRoot: common.Hash{}.Hex(),
}
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
batchHash := batchData1.Hash().Hex()
assert.NoError(t, db.SetBatchHashForL2BlocksInDBTx(dbTx, []uint64{1}, batchHash))
assert.NoError(t, dbTx.Commit())
err = db.UpdateRollupStatus(context.Background(), batchHash, types.RollupFinalized)
assert.NoError(t, err)
relayer.ProcessSavedEvents()
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce)
assert.NoError(t, err)
assert.Equal(t, types.MsgSubmitted, msg.Status)
}
func testL2RelayerProcessCommittedBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
parentBatch1 := &types.BlockBatch{
Index: 0,
Hash: common.Hash{}.Hex(),
StateRoot: common.Hash{}.Hex(),
}
batchData1 := types.NewBatchData(parentBatch1, []*types.WrappedBlock{wrappedBlock1}, nil)
dbTx, err := db.Beginx()
assert.NoError(t, err)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData1))
batchHash := batchData1.Hash().Hex()
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
status, err := db.GetRollupStatus(batchHash)
assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizing, status)
}
func testL2RelayerSkipBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchData := genBatchData(t, index)
assert.NoError(t, db.NewBatchInDBTx(dbTx, batchData))
batchHash := batchData.Hash().Hex()
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchHash, provingStatus)
assert.NoError(t, err)
return batchHash
}
skipped := []string{
createBatch(types.RollupCommitted, types.ProvingTaskSkipped, 1),
createBatch(types.RollupCommitted, types.ProvingTaskFailed, 2),
}
notSkipped := []string{
createBatch(types.RollupPending, types.ProvingTaskSkipped, 3),
createBatch(types.RollupCommitting, types.ProvingTaskSkipped, 4),
createBatch(types.RollupFinalizing, types.ProvingTaskSkipped, 5),
createBatch(types.RollupFinalized, types.ProvingTaskSkipped, 6),
createBatch(types.RollupPending, types.ProvingTaskFailed, 7),
createBatch(types.RollupCommitting, types.ProvingTaskFailed, 8),
createBatch(types.RollupFinalizing, types.ProvingTaskFailed, 9),
createBatch(types.RollupFinalized, types.ProvingTaskFailed, 10),
createBatch(types.RollupCommitted, types.ProvingTaskVerified, 11),
}
relayer.ProcessCommittedBatches()
for _, id := range skipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.Equal(t, types.RollupFinalizationSkipped, status)
}
for _, id := range notSkipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.NotEqual(t, types.RollupFinalizationSkipped, status)
}
}
func testL2RelayerMsgConfirm(t *testing.T) {
// Set up the database and defer closing it.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
assert.NoError(t, db.SaveL2Messages(context.Background(), []*types.L2Message{
{MsgHash: "msg-1", Nonce: 0}, {MsgHash: "msg-2", Nonce: 1},
}))
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
l2Relayer.processingMessage.Store("msg-1", "msg-1")
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: "msg-1",
IsSuccessful: true,
})
l2Relayer.processingMessage.Store("msg-2", "msg-2")
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: "msg-2",
IsSuccessful: false,
})
// Check the database for the updated status using TryTimes.
utils.TryTimes(5, func() bool {
msg1, err1 := db.GetL2MessageByMsgHash("msg-1")
msg2, err2 := db.GetL2MessageByMsgHash("msg-2")
return err1 == nil && msg1.Status == types.MsgConfirmed &&
err2 == nil && msg2.Status == types.MsgRelayFailed
})
}
func testL2RelayerRollupConfirm(t *testing.T) {
// Set up the database and defer closing it.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
batches := make([]*types.BatchData, 6)
for i := 0; i < 6; i++ {
batches[i] = genBatchData(t, uint64(i))
}
dbTx, err := db.Beginx()
assert.NoError(t, err)
for _, batch := range batches {
assert.NoError(t, db.NewBatchInDBTx(dbTx, batch))
}
assert.NoError(t, dbTx.Commit())
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
processingKeys := []string{"committed-1", "committed-2", "finalized-1", "finalized-2"}
isSuccessful := []bool{true, false, true, false}
for i, key := range processingKeys[:2] {
batchHashes := []string{batches[i*2].Hash().Hex(), batches[i*2+1].Hash().Hex()}
l2Relayer.processingBatchesCommitment.Store(key, batchHashes)
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
ID: key,
IsSuccessful: isSuccessful[i],
})
}
for i, key := range processingKeys[2:] {
batchHash := batches[i+4].Hash().Hex()
l2Relayer.processingFinalization.Store(key, batchHash)
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
ID: key,
IsSuccessful: isSuccessful[i+2],
TxHash: common.HexToHash("0x56789abcdef1234"),
})
}
// Check the database for the updated status using TryTimes.
utils.TryTimes(5, func() bool {
expectedStatuses := []types.RollupStatus{
types.RollupCommitted,
types.RollupCommitted,
types.RollupCommitFailed,
types.RollupCommitFailed,
types.RollupFinalized,
types.RollupFinalizeFailed,
}
for i, batch := range batches[:6] {
batchInDB, err := db.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()})
if err != nil || len(batchInDB) != 1 || batchInDB[0].RollupStatus != expectedStatuses[i] {
return false
}
}
return true
})
}
func testL2RelayerGasOracleConfirm(t *testing.T) {
// Set up the database and defer closing it.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
// Insert test data.
batches := make([]*types.BatchData, 2)
for i := 0; i < 2; i++ {
batches[i] = genBatchData(t, uint64(i))
}
dbTx, err := db.Beginx()
assert.NoError(t, err)
for _, batch := range batches {
assert.NoError(t, db.NewBatchInDBTx(dbTx, batch))
}
assert.NoError(t, dbTx.Commit())
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
isSuccessful := []bool{true, false}
for i, batch := range batches {
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ID: batch.Hash().Hex(),
IsSuccessful: isSuccessful[i],
})
}
// Check the database for the updated status using TryTimes.
utils.TryTimes(5, func() bool {
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed}
for i, batch := range batches {
gasOracle, err := db.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()})
if err != nil || len(gasOracle) != 1 || gasOracle[0].OracleStatus != expectedStatuses[i] {
return false
}
}
return true
})
}
func genBatchData(t *testing.T, index uint64) *types.BatchData {
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
// unmarshal blockTrace
wrappedBlock := &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock)
assert.NoError(t, err)
wrappedBlock.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16))
parentBatch := &types.BlockBatch{
Index: index,
Hash: "0x0000000000000000000000000000000000000000",
}
return types.NewBatchData(parentBatch, []*types.WrappedBlock{wrappedBlock}, nil)
}

11
bridge/relayer/params.go Normal file
View File

@@ -0,0 +1,11 @@
package relayer
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
defaultL2MessageRelayMinGasLimit = 200000
)

Some files were not shown because too many files have changed in this diff Show More