mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
3 Commits
store_proo
...
local_disa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d10816de6a | ||
|
|
c3b1713d21 | ||
|
|
87974b560a |
14
.github/pull_request_template.md
vendored
14
.github/pull_request_template.md
vendored
@@ -1,9 +1,9 @@
|
||||
### Purpose or design rationale of this PR
|
||||
## 1. Purpose or design rationale of this PR
|
||||
|
||||
*Describe your change. Make sure to answer these three questions: What does this PR do? Why does it do it? How does it do it?*
|
||||
...
|
||||
|
||||
|
||||
### PR title
|
||||
## 2. PR title
|
||||
|
||||
Your PR title must follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) (as we are doing squash merge for each PR), so it must start with one of the following [types](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#type):
|
||||
|
||||
@@ -18,17 +18,17 @@ Your PR title must follow [conventional commits](https://www.conventionalcommits
|
||||
- [ ] test: Adding missing tests or correcting existing tests
|
||||
|
||||
|
||||
### Deployment tag versioning
|
||||
## 3. Deployment tag versioning
|
||||
|
||||
Has `tag` in `common/version.go` been updated?
|
||||
|
||||
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
|
||||
- [ ] This PR doesn't involve a new deployment, git tag, docker image tag
|
||||
- [ ] Yes
|
||||
|
||||
|
||||
### Breaking change label
|
||||
## 4. Breaking change label
|
||||
|
||||
Does this PR have the `breaking-change` label?
|
||||
|
||||
- [ ] No, this PR is not a breaking change
|
||||
- [ ] This PR is not a breaking change
|
||||
- [ ] Yes
|
||||
|
||||
96
.github/workflows/bridge.yml
vendored
96
.github/workflows/bridge.yml
vendored
@@ -9,8 +9,6 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -20,61 +18,14 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'bridge'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Lint
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make mock_abi
|
||||
make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
run: goimports -local scroll-tech/bridge/ -w .
|
||||
working-directory: 'bridge'
|
||||
- name: Run go mod tidy
|
||||
run: go mod tidy
|
||||
working-directory: 'bridge'
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -92,24 +43,31 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
- name: Lint
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C bridge mock_abi
|
||||
- name: Build bridge binaries
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
make bridge_bins
|
||||
- name: Test bridge packages
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make mock_abi
|
||||
make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
flags: bridge
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/bridge/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
# docker-build:
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
|
||||
41
.github/workflows/bridge_history_api.yml
vendored
41
.github/workflows/bridge_history_api.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: BridgeHistoryAPI
|
||||
name: BridgeHistoryApi
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -25,20 +25,20 @@ defaults:
|
||||
working-directory: 'bridge-history-api'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
# check:
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: Install Go
|
||||
# uses: actions/setup-go@v2
|
||||
# with:
|
||||
# go-version: 1.20.x
|
||||
# - name: Checkout code
|
||||
# uses: actions/checkout@v2
|
||||
# - name: Lint
|
||||
# run: |
|
||||
# rm -rf $HOME/.cache/golangci-lint
|
||||
# make lint
|
||||
test:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
@@ -46,19 +46,13 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
run: |
|
||||
go get ./...
|
||||
make test
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: bridge-history-api
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
@@ -66,7 +60,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -79,3 +73,4 @@ jobs:
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
49
.github/workflows/common.yml
vendored
49
.github/workflows/common.yml
vendored
@@ -20,6 +20,10 @@ on:
|
||||
- 'common/**'
|
||||
- '.github/workflows/common.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'common'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -33,7 +37,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache cargo
|
||||
@@ -41,7 +45,6 @@ jobs:
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Lint
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
@@ -52,52 +55,16 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
working-directory: 'common'
|
||||
run: goimports -local scroll-tech/common/ -w .
|
||||
- name: Run go mod tidy
|
||||
working-directory: 'common'
|
||||
run: go mod tidy
|
||||
- run: goimports -local scroll-tech/common/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
- name: Test common packages
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: common
|
||||
|
||||
54
.github/workflows/coordinator.yml
vendored
54
.github/workflows/coordinator.yml
vendored
@@ -9,8 +9,6 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -20,10 +18,12 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'coordinator'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -37,11 +37,10 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
@@ -52,20 +51,15 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
working-directory: 'coordinator'
|
||||
run: goimports -local scroll-tech/coordinator/ -w .
|
||||
- name: Run go mod tidy
|
||||
working-directory: 'coordinator'
|
||||
run: go mod tidy
|
||||
- run: goimports -local scroll-tech/coordinator/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
@@ -86,35 +80,3 @@ jobs:
|
||||
# push: false
|
||||
# # cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# # cache-to: type=gha,scope=${{ github.workflow }}
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
- name: Test coordinator packages
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: coordinator
|
||||
|
||||
51
.github/workflows/database.yml
vendored
51
.github/workflows/database.yml
vendored
@@ -9,7 +9,6 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '.github/workflows/database.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -19,9 +18,12 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '.github/workflows/database.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'database'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -30,11 +32,10 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
@@ -45,52 +46,16 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
working-directory: 'database'
|
||||
run: goimports -local scroll-tech/database/ -w .
|
||||
- name: Run go mod tidy
|
||||
working-directory: 'database'
|
||||
run: go mod tidy
|
||||
- run: goimports -local scroll-tech/database/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
- name: Test database packages
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: database
|
||||
|
||||
43
.github/workflows/integration.yaml
vendored
43
.github/workflows/integration.yaml
vendored
@@ -1,43 +0,0 @@
|
||||
name: Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C bridge mock_abi
|
||||
make -C common/bytecode all
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...
|
||||
59
.github/workflows/intermediate-docker.yml
vendored
59
.github/workflows/intermediate-docker.yml
vendored
@@ -1,59 +0,0 @@
|
||||
name: Intermediate Docker
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
GO_VERSION:
|
||||
description: 'Go version'
|
||||
required: true
|
||||
type: string
|
||||
default: '1.19'
|
||||
RUST_VERSION:
|
||||
description: 'Rust toolchain version'
|
||||
required: true
|
||||
type: string
|
||||
default: 'nightly-2022-12-10'
|
||||
PYTHON_VERSION:
|
||||
description: 'Python version'
|
||||
required: false
|
||||
type: string
|
||||
default: '3.10'
|
||||
CUDA_VERSION:
|
||||
description: 'Cuda version'
|
||||
required: false
|
||||
type: string
|
||||
default: '11.7.1'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'build/dockerfiles/intermediate'
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build
|
||||
run: |
|
||||
make all
|
||||
env:
|
||||
GO_VERSION: ${{ inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ inputs.RUST_VERSION }}
|
||||
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
|
||||
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
|
||||
- name: Publish
|
||||
run: |
|
||||
make publish
|
||||
env:
|
||||
GO_VERSION: ${{ inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ inputs.RUST_VERSION }}
|
||||
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
|
||||
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
|
||||
80
.github/workflows/prover_stats_api.yml
vendored
80
.github/workflows/prover_stats_api.yml
vendored
@@ -1,80 +0,0 @@
|
||||
name: ProverStatsAPI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'prover-stats-api/**'
|
||||
- '.github/workflows/prover_stats_api.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'prover-stats-api/**'
|
||||
- '.github/workflows/prover_stats_api.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'prover-stats-api'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
test:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
run: |
|
||||
make test
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prover-stats-api
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/prover-stats-api/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
26
.github/workflows/roller.yml
vendored
26
.github/workflows/roller.yml
vendored
@@ -29,25 +29,6 @@ jobs:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
run: |
|
||||
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: roller
|
||||
compile:
|
||||
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-12-10
|
||||
@@ -56,7 +37,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache cargo
|
||||
@@ -66,6 +47,7 @@ jobs:
|
||||
- name: Test
|
||||
run: |
|
||||
make roller
|
||||
go test -tags="mock_prover" -v ./...
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
@@ -73,7 +55,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -87,7 +69,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
|
||||
99
Jenkinsfile
vendored
Normal file
99
Jenkinsfile
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
imagePrefix = 'scrolltech'
|
||||
credentialDocker = 'dockerhub'
|
||||
|
||||
pipeline {
|
||||
agent any
|
||||
options {
|
||||
timeout (20)
|
||||
}
|
||||
tools {
|
||||
go 'go-1.18'
|
||||
nodejs "nodejs"
|
||||
}
|
||||
environment {
|
||||
GO111MODULE = 'on'
|
||||
PATH="/home/ubuntu/.cargo/bin:$PATH"
|
||||
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./coordinator/verifier/lib"
|
||||
CHAIN_ID='534353'
|
||||
// LOG_DOCKER = 'true'
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('Build Prerequisite') {
|
||||
steps {
|
||||
sh 'make dev_docker'
|
||||
sh 'make -C bridge mock_abi'
|
||||
sh 'make -C common/bytecode all'
|
||||
}
|
||||
}
|
||||
stage('Check Bridge Compilation') {
|
||||
steps {
|
||||
sh 'make -C bridge bridge_bins'
|
||||
}
|
||||
}
|
||||
stage('Check Coordinator Compilation') {
|
||||
steps {
|
||||
sh 'export PATH=/home/ubuntu/go/bin:$PATH'
|
||||
sh 'make -C coordinator coordinator'
|
||||
}
|
||||
}
|
||||
stage('Check Database Compilation') {
|
||||
steps {
|
||||
sh 'make -C database db_cli'
|
||||
}
|
||||
}
|
||||
stage('Check Database Docker Build') {
|
||||
steps {
|
||||
sh 'make -C database docker'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Parallel Test') {
|
||||
parallel{
|
||||
stage('Race test common package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic scroll-tech/common/...'
|
||||
}
|
||||
}
|
||||
stage('Race test bridge package') {
|
||||
steps {
|
||||
sh "cd ./bridge && ../build/run_tests.sh bridge"
|
||||
}
|
||||
}
|
||||
stage('Race test coordinator package') {
|
||||
steps {
|
||||
sh 'cd ./coordinator && go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=../coverage.coordinator.txt -covermode=atomic ./...'
|
||||
}
|
||||
}
|
||||
stage('Race test database package') {
|
||||
steps {
|
||||
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic scroll-tech/database/...'
|
||||
}
|
||||
}
|
||||
stage('Integration test') {
|
||||
steps {
|
||||
sh 'go test -v -tags="mock_prover mock_verifier" -p 1 scroll-tech/integration-test/...'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Compare Coverage') {
|
||||
steps {
|
||||
sh "./build/post-test-report-coverage.sh"
|
||||
script {
|
||||
currentBuild.result = 'SUCCESS'
|
||||
}
|
||||
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
publishCoverage adapters: [coberturaReportAdapter(path: 'cobertura.xml', thresholds: [[thresholdTarget: 'Aggregated Report', unhealthyThreshold: 40.0]])], checksName: '', sourceFileResolver: sourceFiles('NEVER_STORE')
|
||||
cleanWs()
|
||||
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
|
||||
}
|
||||
}
|
||||
}
|
||||
23
Makefile
23
Makefile
@@ -1,5 +1,7 @@
|
||||
.PHONY: check update dev_docker build_test_docker run_test_docker clean
|
||||
|
||||
ZKP_VERSION=release-1220
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
@@ -15,12 +17,12 @@ lint: ## The code's format and security checks.
|
||||
|
||||
update: ## update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v3.1.12 && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.1.0 && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v3.1.12 && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v3.1.12 && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v3.1.12 && go mod tidy
|
||||
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v3.1.12 && go mod tidy
|
||||
goimports -local $(PWD)/bridge/ -w .
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
@@ -38,5 +40,16 @@ build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
|
||||
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
|
||||
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app scroll_test_image
|
||||
|
||||
|
||||
test_zkp: ## Test zkp prove and verify, roller/prover generates the proof and coordinator/verifier verifies it
|
||||
mkdir -p test_params
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params19 -O ./test_params/params19
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params26 -O ./test_params/params26
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_seed -O test_seed
|
||||
rm -rf ./roller/assets/test_params && mv test_params ./roller/assets/ && mv test_seed ./roller/assets/
|
||||
cd ./roller && make test-gpu-prover
|
||||
rm -rf ./coordinator/assets/test_params && mv ./roller/assets/test_params ./coordinator/assets/ && mv ./roller/assets/agg_proof ./coordinator/assets/
|
||||
cd ./coordinator && make test-gpu-verifier
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# Scroll Monorepo
|
||||
|
||||
[](https://codecov.io/gh/scroll-tech/scroll)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.19
|
||||
+ Go 1.18
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
+ Hardhat / Foundry
|
||||
+ Docker
|
||||
|
||||
0
assets/.gitkeep
Normal file
0
assets/.gitkeep
Normal file
@@ -39,12 +39,6 @@ var (
|
||||
L2WithdrawERC721Sig common.Hash
|
||||
L2WithdrawERC1155Sig common.Hash
|
||||
|
||||
// batch nft sigs
|
||||
L1BatchDepositERC721Sig common.Hash
|
||||
L1BatchDepositERC1155Sig common.Hash
|
||||
L2BatchWithdrawERC721Sig common.Hash
|
||||
L2BatchWithdrawERC1155Sig common.Hash
|
||||
|
||||
// scroll mono repo
|
||||
|
||||
// ScrollChainABI holds information about ScrollChain's context and available invokable methods.
|
||||
@@ -122,12 +116,6 @@ func init() {
|
||||
L2ERC1155GatewayABI, _ = L2ERC1155GatewayMetaData.GetAbi()
|
||||
L2WithdrawERC1155Sig = L2ERC1155GatewayABI.Events["WithdrawERC1155"].ID
|
||||
|
||||
// batch nft events
|
||||
L1BatchDepositERC721Sig = L1ERC721GatewayABI.Events["BatchDepositERC721"].ID
|
||||
L1BatchDepositERC1155Sig = L1ERC1155GatewayABI.Events["BatchDepositERC1155"].ID
|
||||
L2BatchWithdrawERC721Sig = L2ERC721GatewayABI.Events["BatchWithdrawERC721"].ID
|
||||
L2BatchWithdrawERC1155Sig = L2ERC1155GatewayABI.Events["BatchWithdrawERC1155"].ID
|
||||
|
||||
// scroll monorepo
|
||||
ScrollChainABI, _ = ScrollChainMetaData.GetAbi()
|
||||
ScrollChainV2ABI, _ = ScrollChainV2MetaData.GetAbi()
|
||||
@@ -289,23 +277,6 @@ type ERC1155MessageEvent struct {
|
||||
Amount *big.Int
|
||||
}
|
||||
|
||||
type BatchERC721MessageEvent struct {
|
||||
L1Token common.Address
|
||||
L2Token common.Address
|
||||
From common.Address
|
||||
To common.Address
|
||||
TokenIDs []*big.Int
|
||||
}
|
||||
|
||||
type BatchERC1155MessageEvent struct {
|
||||
L1Token common.Address
|
||||
L2Token common.Address
|
||||
From common.Address
|
||||
To common.Address
|
||||
TokenIDs []*big.Int
|
||||
TokenAmounts []*big.Int
|
||||
}
|
||||
|
||||
// scroll monorepo
|
||||
|
||||
// L1SentMessageEvent represents a SentMessage event raised by the L1ScrollMessenger contract.
|
||||
|
||||
@@ -24,40 +24,26 @@ var (
|
||||
var database db.OrmFactory
|
||||
|
||||
func pong(ctx iris.Context) {
|
||||
_, err := ctx.WriteString("pong")
|
||||
if err != nil {
|
||||
log.Error("failed to write pong", "err", err)
|
||||
}
|
||||
ctx.WriteString("pong")
|
||||
}
|
||||
|
||||
func setupQueryByAddressHandler(backendApp *mvc.Application) {
|
||||
func setupQueryByAddressHandler(backend_app *mvc.Application) {
|
||||
// Register Dependencies.
|
||||
backendApp.Register(
|
||||
backend_app.Register(
|
||||
database,
|
||||
service.NewHistoryService,
|
||||
)
|
||||
|
||||
// Register Controllers.
|
||||
backendApp.Handle(new(controller.QueryAddressController))
|
||||
backend_app.Handle(new(controller.QueryAddressController))
|
||||
}
|
||||
|
||||
func setupQueryClaimableHandler(backendApp *mvc.Application) {
|
||||
// Register Dependencies.
|
||||
backendApp.Register(
|
||||
func setupQueryByHashHandler(backend_app *mvc.Application) {
|
||||
backend_app.Register(
|
||||
database,
|
||||
service.NewHistoryService,
|
||||
)
|
||||
|
||||
// Register Controllers.
|
||||
backendApp.Handle(new(controller.QueryClaimableController))
|
||||
}
|
||||
|
||||
func setupQueryByHashHandler(backendApp *mvc.Application) {
|
||||
backendApp.Register(
|
||||
database,
|
||||
service.NewHistoryService,
|
||||
)
|
||||
backendApp.Handle(new(controller.QueryHashController))
|
||||
backend_app.Handle(new(controller.QueryHashController))
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -90,18 +76,13 @@ func action(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
log.Crit("can not connect to database", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = database.Close(); err != nil {
|
||||
log.Error("failed to close database", "err", err)
|
||||
}
|
||||
}()
|
||||
defer database.Close()
|
||||
bridgeApp := iris.New()
|
||||
bridgeApp.UseRouter(corsOptions)
|
||||
bridgeApp.Get("/ping", pong).Describe("healthcheck")
|
||||
|
||||
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler)
|
||||
mvc.Configure(bridgeApp.Party("/api/txsbyhashes"), setupQueryByHashHandler)
|
||||
mvc.Configure(bridgeApp.Party("/api/claimable"), setupQueryClaimableHandler)
|
||||
|
||||
// TODO: make debug mode configurable
|
||||
err = bridgeApp.Listen(cfg.Server.HostPort, iris.WithLogLevel("debug"))
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/crossmsg"
|
||||
"bridge-history-api/crossmsg/messageproof"
|
||||
"bridge-history-api/cross_msg"
|
||||
"bridge-history-api/cross_msg/message_proof"
|
||||
"bridge-history-api/db"
|
||||
cutils "bridge-history-api/utils"
|
||||
)
|
||||
@@ -54,20 +54,15 @@ func action(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
db, err := db.NewOrmFactory(cfg)
|
||||
defer func() {
|
||||
if deferErr := db.Close(); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
defer db.Close()
|
||||
if err != nil {
|
||||
log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
l1worker := &crossmsg.FetchEventWorker{F: crossmsg.L1FetchAndSaveEvents, G: crossmsg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"}
|
||||
l1worker := &cross_msg.FetchEventWorker{F: cross_msg.L1FetchAndSaveEvents, G: cross_msg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"}
|
||||
|
||||
l2worker := &crossmsg.FetchEventWorker{F: crossmsg.L2FetchAndSaveEvents, G: crossmsg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"}
|
||||
l2worker := &cross_msg.FetchEventWorker{F: cross_msg.L2FetchAndSaveEvents, G: cross_msg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"}
|
||||
|
||||
l1AddressList := []common.Address{
|
||||
common.HexToAddress(cfg.L1.CustomERC20GatewayAddr),
|
||||
@@ -89,7 +84,7 @@ func action(ctx *cli.Context) error {
|
||||
common.HexToAddress(cfg.L2.WETHGatewayAddr),
|
||||
}
|
||||
|
||||
l1crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, crossmsg.L1ReorgHandling)
|
||||
l1crossMsgFetcher, err := cross_msg.NewCrossMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, cross_msg.L1ReorgHandling)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l1 cross message fetcher", "error", err)
|
||||
}
|
||||
@@ -97,7 +92,7 @@ func action(ctx *cli.Context) error {
|
||||
go l1crossMsgFetcher.Start()
|
||||
defer l1crossMsgFetcher.Stop()
|
||||
|
||||
l2crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, crossmsg.L2ReorgHandling)
|
||||
l2crossMsgFetcher, err := cross_msg.NewCrossMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, cross_msg.L2ReorgHandling)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l2 cross message fetcher", "error", err)
|
||||
}
|
||||
@@ -106,17 +101,17 @@ func action(ctx *cli.Context) error {
|
||||
defer l2crossMsgFetcher.Stop()
|
||||
|
||||
// BlockTimestamp fetcher for l1 and l2
|
||||
l1BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db.UpdateL1BlockTimestamp, db.GetL1EarliestNoBlockTimestampHeight)
|
||||
l1BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db.UpdateL1BlockTimestamp, db.GetL1EarliestNoBlockTimestampHeight)
|
||||
go l1BlockTimeFetcher.Start()
|
||||
defer l1BlockTimeFetcher.Stop()
|
||||
|
||||
l2BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, db.UpdateL2BlockTimestamp, db.GetL2EarliestNoBlockTimestampHeight)
|
||||
l2BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, db.UpdateL2BlockTimestamp, db.GetL2EarliestNoBlockTimestampHeight)
|
||||
go l2BlockTimeFetcher.Start()
|
||||
defer l2BlockTimeFetcher.Stop()
|
||||
|
||||
// Proof updater and batch fetcher
|
||||
l2msgProofUpdater := messageproof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db)
|
||||
batchFetcher := crossmsg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater)
|
||||
l2msgProofUpdater := message_proof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db)
|
||||
batchFetcher := cross_msg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater)
|
||||
go batchFetcher.Start()
|
||||
defer batchFetcher.Stop()
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// BatchInfoFetcherConfig is the configuration of BatchInfoFetcher
|
||||
type BatchInfoFetcherConfig struct {
|
||||
BatchIndexStartBlock uint64 `json:"batchIndexStartBlock"`
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
@@ -22,7 +21,6 @@ type DBConfig struct {
|
||||
MaxIdleNum int `json:"maxIdleNum"`
|
||||
}
|
||||
|
||||
// LayerConfig is the configuration of Layer1/Layer2
|
||||
type LayerConfig struct {
|
||||
Confirmation uint64 `json:"confirmation"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
@@ -37,7 +35,6 @@ type LayerConfig struct {
|
||||
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
|
||||
}
|
||||
|
||||
// ServerConfig is the configuration of the bridge history backend server port
|
||||
type ServerConfig struct {
|
||||
HostPort string `json:"hostPort"`
|
||||
}
|
||||
|
||||
@@ -7,36 +7,14 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// QueryAddressController contains the query by address service
|
||||
type QueryAddressController struct {
|
||||
Service service.HistoryService
|
||||
}
|
||||
|
||||
// QueryHashController contains the query by hash service
|
||||
type QueryHashController struct {
|
||||
Service service.HistoryService
|
||||
}
|
||||
|
||||
// QueryClaimableController contains the query claimable txs service
|
||||
type QueryClaimableController struct {
|
||||
Service service.HistoryService
|
||||
}
|
||||
|
||||
// Get defines the http get method behavior for QueryClaimableController
|
||||
func (c *QueryClaimableController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
|
||||
txs, total, err := c.Service.GetClaimableTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
|
||||
if err != nil {
|
||||
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
|
||||
}
|
||||
|
||||
return &model.QueryByAddressResponse{Message: "ok",
|
||||
Data: &model.Data{
|
||||
Result: txs,
|
||||
Total: total,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
// Get defines the http get method behavior for QueryAddressController
|
||||
func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
|
||||
message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
|
||||
if err != nil {
|
||||
@@ -50,7 +28,6 @@ func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.Qu
|
||||
}}, nil
|
||||
}
|
||||
|
||||
// Post defines the http post method behavior for QueryHashController
|
||||
func (c *QueryHashController) Post(req model.QueryByHashRequest) (*model.QueryByHashResponse, error) {
|
||||
result, err := c.Service.GetTxsByHashes(req.Txs)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package crossmsg
|
||||
package cross_msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -8,12 +8,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"bridge-history-api/crossmsg/messageproof"
|
||||
"bridge-history-api/cross_msg/message_proof"
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// BatchInfoFetcher fetches batch info from l1 chain and update db
|
||||
type BatchInfoFetcher struct {
|
||||
ctx context.Context
|
||||
scrollChainAddr common.Address
|
||||
@@ -22,11 +21,10 @@ type BatchInfoFetcher struct {
|
||||
blockTimeInSec int
|
||||
client *ethclient.Client
|
||||
db db.OrmFactory
|
||||
msgProofUpdater *messageproof.MsgProofUpdater
|
||||
msgProofUpdater *message_proof.MsgProofUpdater
|
||||
}
|
||||
|
||||
// NewBatchInfoFetcher creates a new BatchInfoFetcher instance
|
||||
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db db.OrmFactory, msgProofUpdater *messageproof.MsgProofUpdater) *BatchInfoFetcher {
|
||||
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db db.OrmFactory, msgProofUpdater *message_proof.MsgProofUpdater) *BatchInfoFetcher {
|
||||
return &BatchInfoFetcher{
|
||||
ctx: ctx,
|
||||
scrollChainAddr: scrollChainAddr,
|
||||
@@ -39,14 +37,13 @@ func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, ba
|
||||
}
|
||||
}
|
||||
|
||||
// Start the BatchInfoFetcher
|
||||
func (b *BatchInfoFetcher) Start() {
|
||||
log.Info("BatchInfoFetcher Start")
|
||||
// Fetch batch info at beginning
|
||||
// Then start msg proof updater after db have some bridge batch
|
||||
err := b.fetchBatchInfo()
|
||||
if err != nil {
|
||||
log.Error("fetch batch info at beginning failed: ", "err", err)
|
||||
log.Error("fetch batch info at begining failed: ", "err", err)
|
||||
}
|
||||
|
||||
go b.msgProofUpdater.Start()
|
||||
@@ -68,7 +65,6 @@ func (b *BatchInfoFetcher) Start() {
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the BatchInfoFetcher and call msg proof updater to stop
|
||||
func (b *BatchInfoFetcher) Stop() {
|
||||
log.Info("BatchInfoFetcher Stop")
|
||||
b.msgProofUpdater.Stop()
|
||||
@@ -1,4 +1,4 @@
|
||||
package crossmsg
|
||||
package cross_msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -9,13 +9,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// GetEarliestNoBlockTimestampHeightFunc is a function type that gets the earliest record without block timestamp from database
|
||||
type GetEarliestNoBlockTimestampHeightFunc func() (uint64, error)
|
||||
|
||||
// UpdateBlockTimestampFunc is a function type that updates block timestamp into database
|
||||
type UpdateBlockTimestampFunc func(height uint64, timestamp time.Time) error
|
||||
|
||||
// BlockTimestampFetcher fetches block timestamp from blockchain and saves them to database
|
||||
type BlockTimestampFetcher struct {
|
||||
ctx context.Context
|
||||
confirmation uint64
|
||||
@@ -25,7 +21,6 @@ type BlockTimestampFetcher struct {
|
||||
getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc
|
||||
}
|
||||
|
||||
// NewBlockTimestampFetcher creates a new BlockTimestampFetcher instance
|
||||
func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTimeInSec int, client *ethclient.Client, updateBlockTimestampFunc UpdateBlockTimestampFunc, getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc) *BlockTimestampFetcher {
|
||||
return &BlockTimestampFetcher{
|
||||
ctx: ctx,
|
||||
@@ -37,7 +32,6 @@ func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTim
|
||||
}
|
||||
}
|
||||
|
||||
// Start the BlockTimestampFetcher
|
||||
func (b *BlockTimestampFetcher) Start() {
|
||||
go func() {
|
||||
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
|
||||
@@ -79,7 +73,6 @@ func (b *BlockTimestampFetcher) Start() {
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the BlockTimestampFetcher and log the info
|
||||
func (b *BlockTimestampFetcher) Stop() {
|
||||
log.Info("BlockTimestampFetcher Stop")
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package crossmsg
|
||||
package cross_msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -18,8 +18,7 @@ import (
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// MsgFetcher fetches cross message events from blockchain and saves them to database
|
||||
type MsgFetcher struct {
|
||||
type CrossMsgFetcher struct {
|
||||
ctx context.Context
|
||||
config *config.LayerConfig
|
||||
db db.OrmFactory
|
||||
@@ -33,9 +32,8 @@ type MsgFetcher struct {
|
||||
reorgEndCh chan struct{}
|
||||
}
|
||||
|
||||
// NewMsgFetcher creates a new MsgFetcher instance
|
||||
func NewMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*MsgFetcher, error) {
|
||||
msgFetcher := &MsgFetcher{
|
||||
func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*CrossMsgFetcher, error) {
|
||||
crossMsgFetcher := &CrossMsgFetcher{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
db: db,
|
||||
@@ -47,12 +45,11 @@ func NewMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFac
|
||||
reorgStartCh: make(chan struct{}),
|
||||
reorgEndCh: make(chan struct{}),
|
||||
}
|
||||
return msgFetcher, nil
|
||||
return crossMsgFetcher, nil
|
||||
}
|
||||
|
||||
// Start the MsgFetcher
|
||||
func (c *MsgFetcher) Start() {
|
||||
log.Info("MsgFetcher Start")
|
||||
func (c *CrossMsgFetcher) Start() {
|
||||
log.Info("CrossMsgFetcher Start")
|
||||
// fetch missing events from finalized blocks, we don't handle reorgs here
|
||||
c.forwardFetchAndSaveMissingEvents(c.config.Confirmation)
|
||||
|
||||
@@ -78,7 +75,7 @@ func (c *MsgFetcher) Start() {
|
||||
return
|
||||
case <-tick.C:
|
||||
c.mu.Lock()
|
||||
c.forwardFetchAndSaveMissingEvents(1)
|
||||
c.forwardFetchAndSaveMissingEvents(0)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
@@ -97,13 +94,12 @@ func (c *MsgFetcher) Start() {
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the MsgFetcher and log the info
|
||||
func (c *MsgFetcher) Stop() {
|
||||
log.Info("MsgFetcher Stop")
|
||||
func (c *CrossMsgFetcher) Stop() {
|
||||
log.Info("CrossMsgFetcher Stop")
|
||||
}
|
||||
|
||||
// forwardFetchAndSaveMissingEvents will fetch all events from the latest processed height to the latest block number.
|
||||
func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
|
||||
func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
|
||||
// if we fetch to the latest block, shall not exceed cachedHeaders
|
||||
var number uint64
|
||||
var err error
|
||||
@@ -128,7 +124,7 @@ func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
|
||||
if processedHeight <= 0 || processedHeight < int64(c.config.StartHeight) {
|
||||
processedHeight = int64(c.config.StartHeight)
|
||||
} else {
|
||||
processedHeight++
|
||||
processedHeight += 1
|
||||
}
|
||||
for from := processedHeight; from <= int64(number); from += fetchLimit {
|
||||
to := from + fetchLimit - 1
|
||||
@@ -143,7 +139,7 @@ func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MsgFetcher) fetchMissingLatestHeaders() {
|
||||
func (c *CrossMsgFetcher) fetchMissingLatestHeaders() {
|
||||
var start int64
|
||||
number, err := c.client.BlockNumber(c.ctx)
|
||||
if err != nil {
|
||||
@@ -163,7 +159,7 @@ func (c *MsgFetcher) fetchMissingLatestHeaders() {
|
||||
close(c.reorgEndCh)
|
||||
return
|
||||
default:
|
||||
header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(i))
|
||||
header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(int64(i)))
|
||||
if err != nil {
|
||||
log.Error("failed to get latest header", "err", err)
|
||||
return
|
||||
@@ -185,9 +181,9 @@ func (c *MsgFetcher) fetchMissingLatestHeaders() {
|
||||
c.mu.Lock()
|
||||
index, ok, validHeaders := BackwardFindReorgBlock(c.ctx, c.cachedHeaders, c.client, header)
|
||||
if !ok {
|
||||
log.Error("Reorg happened too earlier than cached headers", "reorg height", header.Number)
|
||||
num, getSafeErr := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation)
|
||||
if getSafeErr != nil {
|
||||
log.Error("Reorg happended too earlier than cached headers", "reorg height", header.Number)
|
||||
num, err := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation)
|
||||
if err != nil {
|
||||
log.Crit("Can not get safe number during reorg, quit the process", "err", err)
|
||||
}
|
||||
// clear all our saved data, because no data is safe now
|
||||
@@ -1,4 +1,4 @@
|
||||
package crossmsg
|
||||
package cross_msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
backendabi "bridge-history-api/abi"
|
||||
"bridge-history-api/db"
|
||||
@@ -25,15 +26,14 @@ type FetchAndSave func(ctx context.Context, client *ethclient.Client, database d
|
||||
|
||||
// GetLatestProcessed is a function type that gets the latest processed block height from database
|
||||
type GetLatestProcessed func(db db.OrmFactory) (int64, error)
|
||||
type UpdateXHash func(ctx context.Context)
|
||||
|
||||
// FetchEventWorker defines worker with fetch and save function, processed number getter, and name
|
||||
type FetchEventWorker struct {
|
||||
F FetchAndSave
|
||||
G GetLatestProcessed
|
||||
Name string
|
||||
}
|
||||
|
||||
// GetLatestL1ProcessedHeight get L1 the latest processed height
|
||||
func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
|
||||
crossHeight, err := db.GetLatestL1ProcessedHeight()
|
||||
if err != nil {
|
||||
@@ -47,11 +47,11 @@ func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
|
||||
}
|
||||
if crossHeight > relayedHeight {
|
||||
return crossHeight, nil
|
||||
} else {
|
||||
return relayedHeight, nil
|
||||
}
|
||||
return relayedHeight, nil
|
||||
}
|
||||
|
||||
// GetLatestL2ProcessedHeight get L2 latest processed height
|
||||
func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
|
||||
crossHeight, err := db.GetLatestL2ProcessedHeight()
|
||||
if err != nil {
|
||||
@@ -78,7 +78,6 @@ func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
|
||||
return maxHeight, nil
|
||||
}
|
||||
|
||||
// L1FetchAndSaveEvents fetch and save events on L1
|
||||
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
@@ -100,7 +99,7 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Warn("Failed to get l1 event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
depositL1CrossMsgs, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
|
||||
depositL1CrossMsgs, msgHashes, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
@@ -112,25 +111,24 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
}
|
||||
err = database.BatchInsertL1CrossMsgDBTx(dbTx, depositL1CrossMsgs)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
|
||||
}
|
||||
|
||||
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("l1FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
|
||||
}
|
||||
err = updateL1CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l1FetchAndSaveEvents: Failed to update msgHash in L1 cross msg", "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Error("l1FetchAndSaveEvents: Failed to commit db transaction", "err", err)
|
||||
return err
|
||||
}
|
||||
@@ -138,7 +136,6 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
return nil
|
||||
}
|
||||
|
||||
// L2FetchAndSaveEvents fetche and save events on L2
|
||||
func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
@@ -160,12 +157,11 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Warn("Failed to get l2 event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
depositL2CrossMsgs, relayedMsg, l2SentMsgs, err := utils.ParseBackendL2EventLogs(logs)
|
||||
depositL2CrossMsgs, msgHashes, relayedMsg, l2sentMsgs, err := utils.ParseBackendL2EventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
dbTx, err := database.Beginx()
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
|
||||
@@ -173,34 +169,32 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
}
|
||||
err = database.BatchInsertL2CrossMsgDBTx(dbTx, depositL2CrossMsgs)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
|
||||
}
|
||||
|
||||
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
|
||||
}
|
||||
|
||||
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2SentMsgs)
|
||||
err = updateL2CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to update msgHash in L2 cross msg", "err", err)
|
||||
}
|
||||
|
||||
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2sentMsgs)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Error("l2FetchAndSaveEvents: Failed to commit db transaction", "err", err)
|
||||
return err
|
||||
}
|
||||
@@ -208,7 +202,6 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchAndSaveBatchIndex fetche and save batch index
|
||||
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, scrollChainAddr common.Address) error {
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
@@ -235,19 +228,37 @@ func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, datab
|
||||
}
|
||||
err = database.BatchInsertRollupBatchDBTx(dbTx, rollupBatches)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Error("FetchAndSaveBatchIndex: Failed to commit db transaction", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
|
||||
for _, msgHash := range msgHashes {
|
||||
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
|
||||
if err != nil {
|
||||
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.TxHash, "err", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
|
||||
for _, msgHash := range msgHashes {
|
||||
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
|
||||
if err != nil {
|
||||
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.TxHash, "err", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package messageproof
|
||||
package message_proof
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -14,14 +14,12 @@ import (
|
||||
"bridge-history-api/db/orm"
|
||||
)
|
||||
|
||||
// MsgProofUpdater is used to update message proof in db
|
||||
type MsgProofUpdater struct {
|
||||
ctx context.Context
|
||||
db db.OrmFactory
|
||||
withdrawTrie *WithdrawTrie
|
||||
}
|
||||
|
||||
// NewMsgProofUpdater new MsgProofUpdater instance
|
||||
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db db.OrmFactory) *MsgProofUpdater {
|
||||
return &MsgProofUpdater{
|
||||
ctx: ctx,
|
||||
@@ -30,7 +28,6 @@ func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock ui
|
||||
}
|
||||
}
|
||||
|
||||
// Start the MsgProofUpdater
|
||||
func (m *MsgProofUpdater) Start() {
|
||||
log.Info("MsgProofUpdater Start")
|
||||
m.initialize(m.ctx)
|
||||
@@ -86,7 +83,6 @@ func (m *MsgProofUpdater) Start() {
|
||||
|
||||
}
|
||||
|
||||
// Stop the MsgProofUpdater
|
||||
func (m *MsgProofUpdater) Stop() {
|
||||
log.Info("MsgProofUpdater Stop")
|
||||
}
|
||||
@@ -116,7 +112,7 @@ func (m *MsgProofUpdater) initializeWithdrawTrie() error {
|
||||
return fmt.Errorf("failed to get first l2 message: %v", err)
|
||||
}
|
||||
// no l2 message
|
||||
// TO DO: check if we really dont have l2 sent message with nonce 0
|
||||
// TO DO: check if we realy dont have l2 sent message with nonce 0
|
||||
if firstMsg == nil {
|
||||
log.Info("No first l2sentmsg in db")
|
||||
return nil
|
||||
@@ -187,7 +183,7 @@ func (m *MsgProofUpdater) updateMsgProof(msgs []*orm.L2SentMsg, proofs [][]byte,
|
||||
if len(msgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
// this should not happen, but double check
|
||||
// this should not happend, but double checked
|
||||
if len(msgs) != len(proofs) {
|
||||
return fmt.Errorf("illegal state: len(msgs) != len(proofs)")
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package messageproof
|
||||
package message_proof
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -1,4 +1,4 @@
|
||||
package messageproof
|
||||
package message_proof
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
@@ -1,4 +1,4 @@
|
||||
package crossmsg_test
|
||||
package cross_msg_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bridge-history-api/crossmsg"
|
||||
"bridge-history-api/cross_msg"
|
||||
)
|
||||
|
||||
func TestMergeIntoList(t *testing.T) {
|
||||
@@ -18,7 +18,7 @@ func TestMergeIntoList(t *testing.T) {
|
||||
assert.Equal(t, headers[0].Hash(), headers[1].ParentHash)
|
||||
headers2, err := generateHeaders(18)
|
||||
assert.NoError(t, err)
|
||||
result := crossmsg.MergeAddIntoHeaderList(headers, headers2, 64)
|
||||
result := cross_msg.MergeAddIntoHeaderList(headers, headers2, 64)
|
||||
assert.Equal(t, 64, len(result))
|
||||
assert.Equal(t, headers2[len(headers2)-1], result[len(result)-1])
|
||||
assert.NotEqual(t, headers[0], result[0])
|
||||
@@ -53,7 +53,7 @@ func generateHeaders(amount int) ([]*types.Header, error) {
|
||||
Time: uint64(i * 15),
|
||||
Extra: []byte{},
|
||||
MixDigest: common.Hash{},
|
||||
Nonce: types.EncodeNonce(nonce.Uint64()),
|
||||
Nonce: types.EncodeNonce(uint64(nonce.Uint64())),
|
||||
}
|
||||
headers[i] = header
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package crossmsg
|
||||
package cross_msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"bridge-history-api/db"
|
||||
)
|
||||
|
||||
// ReorgHandling handles reorg function type
|
||||
type ReorgHandling func(ctx context.Context, reorgHeight int64, db db.OrmFactory) error
|
||||
|
||||
func reverseArray(arr []*types.Header) []*types.Header {
|
||||
@@ -21,12 +20,10 @@ func reverseArray(arr []*types.Header) []*types.Header {
|
||||
return arr
|
||||
}
|
||||
|
||||
// IsParentAndChild match the child header ParentHash with parent header Hash
|
||||
func IsParentAndChild(parentHeader *types.Header, header *types.Header) bool {
|
||||
return header.ParentHash == parentHeader.Hash()
|
||||
}
|
||||
|
||||
// MergeAddIntoHeaderList merges two header lists, if exceed the max length then drop the oldest entries
|
||||
func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []*types.Header {
|
||||
mergedArr := append(baseArr, extraArr...)
|
||||
if len(mergedArr) <= maxLength {
|
||||
@@ -37,12 +34,11 @@ func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []
|
||||
return mergedArr[startIndex:]
|
||||
}
|
||||
|
||||
// BackwardFindReorgBlock finds the reorg block by backward search
|
||||
func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, lastHeader *types.Header) (int, bool, []*types.Header) {
|
||||
func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, header *types.Header) (int, bool, []*types.Header) {
|
||||
maxStep := len(headers)
|
||||
backwardHeaderList := []*types.Header{lastHeader}
|
||||
backwardHeaderList := []*types.Header{header}
|
||||
for iterRound := 0; iterRound < maxStep; iterRound++ {
|
||||
header, err := client.HeaderByHash(ctx, lastHeader.ParentHash)
|
||||
header, err := client.HeaderByHash(ctx, header.ParentHash)
|
||||
if err != nil {
|
||||
log.Error("BackwardFindReorgBlock failed", "error", err)
|
||||
return -1, false, nil
|
||||
@@ -54,12 +50,10 @@ func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client
|
||||
return j, true, backwardHeaderList
|
||||
}
|
||||
}
|
||||
lastHeader = header
|
||||
}
|
||||
return -1, false, nil
|
||||
}
|
||||
|
||||
// L1ReorgHandling handles l1 reorg
|
||||
func L1ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
|
||||
dbTx, err := db.Beginx()
|
||||
if err != nil {
|
||||
@@ -67,64 +61,47 @@ func L1ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) e
|
||||
}
|
||||
err = db.DeleteL1CrossMsgAfterHeightDBTx(dbTx, reorgHeight)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("delete l1 cross msg from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = db.DeleteL1RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("delete l1 relayed hash from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Error("commit tx failed", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// L2ReorgHandling handles l2 reorg
|
||||
func L2ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
|
||||
dbTx, err := db.Beginx()
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("begin db tx failed", "err", err)
|
||||
}
|
||||
err = db.DeleteL2CrossMsgFromHeightDBTx(dbTx, reorgHeight)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("delete l2 cross msg from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = db.DeleteL2RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("delete l2 relayed hash from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = db.DeleteL2SentMsgAfterHeightDBTx(dbTx, reorgHeight)
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Crit("delete l2 sent msg from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
if rollBackErr := dbTx.Rollback(); rollBackErr != nil {
|
||||
log.Error("dbTx Rollback failed", "err", rollBackErr)
|
||||
}
|
||||
dbTx.Rollback()
|
||||
log.Error("commit tx failed", "err", err)
|
||||
return err
|
||||
}
|
||||
@@ -3,7 +3,7 @@
|
||||
create table cross_message
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
msg_hash VARCHAR NOT NULL DEFAULT '',
|
||||
height BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
@@ -12,30 +12,30 @@ create table cross_message
|
||||
layer2_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer1_token VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_token VARCHAR NOT NULL DEFAULT '',
|
||||
token_id BIGINT NOT NULL DEFAULT 0,
|
||||
asset SMALLINT NOT NULL,
|
||||
msg_type SMALLINT NOT NULL,
|
||||
token_ids TEXT NOT NULL DEFAULT '',
|
||||
token_amounts TEXT NOT NULL DEFAULT '',
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
block_timestamp TIMESTAMP(0) DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_msg_hash_msg_type
|
||||
on cross_message (msg_hash, msg_type) where deleted_at IS NULL;
|
||||
|
||||
comment
|
||||
on column cross_message.asset is 'ETH, ERC20, ERC721, ERC1155';
|
||||
|
||||
comment
|
||||
on column cross_message.msg_type is 'unknown, l1msg, l2msg';
|
||||
|
||||
CREATE INDEX idx_l1_msg_index ON cross_message (layer1_hash, deleted_at);
|
||||
comment
|
||||
on column cross_message.is_deleted is 'NotDeleted false, Deleted true';
|
||||
|
||||
CREATE INDEX idx_l2_msg_index ON cross_message (layer2_hash, deleted_at);
|
||||
CREATE INDEX valid_l1_msg_index ON cross_message (layer1_hash, is_deleted);
|
||||
|
||||
CREATE INDEX idx_height_msg_type_index ON cross_message (height, msg_type, deleted_at);
|
||||
CREATE INDEX valid_l2_msg_index ON cross_message (layer2_hash, is_deleted);
|
||||
|
||||
CREATE INDEX valid_height_index ON cross_message (height, msg_type, is_deleted);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
@@ -49,6 +49,22 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON cross_message FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE cross_message SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON cross_message
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -7,19 +7,17 @@ create table relayed_msg
|
||||
height BIGINT NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_hash VARCHAR NOT NULL DEFAULT '',
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_msg_hash_l1_hash_l2_hash
|
||||
on relayed_msg (msg_hash, layer1_hash, layer2_hash) where deleted_at IS NULL;
|
||||
comment
|
||||
on column relayed_msg.is_deleted is 'NotDeleted, Deleted';
|
||||
|
||||
CREATE INDEX idx_l1_msg_relayed_msg ON relayed_msg (layer1_hash, deleted_at);
|
||||
|
||||
CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at);
|
||||
|
||||
CREATE INDEX idx_msg_hash_deleted_at_relayed_msg on relayed_msg (msg_hash, deleted_at);
|
||||
create unique index relayed_msg_hash_uindex
|
||||
on relayed_msg (msg_hash);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
@@ -33,6 +31,22 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON relayed_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE relayed_msg SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON relayed_msg
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
create table l2_sent_msg
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
original_sender VARCHAR NOT NULL DEFAULT '',
|
||||
tx_hash VARCHAR NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
@@ -14,18 +12,14 @@ create table l2_sent_msg
|
||||
batch_index BIGINT NOT NULL DEFAULT 0,
|
||||
msg_proof TEXT NOT NULL DEFAULT '',
|
||||
msg_data TEXT NOT NULL DEFAULT '',
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_msg_hash
|
||||
on l2_sent_msg (msg_hash) where deleted_at IS NULL;
|
||||
|
||||
create unique index uk_nonce
|
||||
on l2_sent_msg (nonce) where deleted_at IS NULL;
|
||||
|
||||
CREATE INDEX idx_msg_hash_deleted_at_l2_sent_msg on l2_sent_msg (msg_hash, deleted_at);
|
||||
comment
|
||||
on column l2_sent_msg.is_deleted is 'NotDeleted, Deleted';
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
@@ -39,6 +33,22 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON l2_sent_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE l2_sent_msg SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON l2_sent_msg
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -8,17 +8,12 @@ create table rollup_batch
|
||||
start_block_number BIGINT NOT NULL,
|
||||
end_block_number BIGINT NOT NULL,
|
||||
batch_hash VARCHAR NOT NULL,
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_batch_index
|
||||
on rollup_batch (batch_index) where deleted_at IS NULL;
|
||||
|
||||
create unique index uk_batch_hash
|
||||
on rollup_batch (batch_hash) where deleted_at IS NULL;
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
@@ -31,6 +26,21 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON rollup_batch FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE rollup_batch SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON rollup_batch
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
@@ -11,7 +12,6 @@ type rollupBatchOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// RollupBatch is the struct for rollup_batch table
|
||||
type RollupBatch struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
|
||||
@@ -40,6 +40,14 @@ func (b *rollupBatchOrm) BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, batches []*Ro
|
||||
"start_block_number": batch.StartBlockNumber,
|
||||
"end_block_number": batch.EndBlockNumber,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM rollup_batch WHERE batch_index = $1 AND NOT is_deleted)`, batch.BatchIndex).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertRollupBatchDBTx: batch index %v already exists at height %v", batch.BatchIndex, batch.CommitHeight)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into rollup_batch(commit_height, batch_index, batch_hash, start_block_number, end_block_number) values(:commit_height, :batch_index, :batch_hash, :start_block_number, :end_block_number);`, batchMaps)
|
||||
if err != nil {
|
||||
|
||||
@@ -8,10 +8,7 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// AssetType can be ETH/ERC20/ERC1155/ERC721
|
||||
type AssetType int
|
||||
|
||||
// MsgType can be layer1/layer2 msg
|
||||
type MsgType int
|
||||
|
||||
func (a AssetType) String() string {
|
||||
@@ -29,45 +26,45 @@ func (a AssetType) String() string {
|
||||
}
|
||||
|
||||
const (
|
||||
// ETH = 0
|
||||
ETH AssetType = iota
|
||||
// ERC20 = 1
|
||||
ERC20
|
||||
// ERC721 = 2
|
||||
ERC721
|
||||
// ERC1155 = 3
|
||||
ERC1155
|
||||
)
|
||||
|
||||
const (
|
||||
// UnknownMsg = 0
|
||||
UnknownMsg MsgType = iota
|
||||
// Layer1Msg = 1
|
||||
Layer1Msg
|
||||
// Layer2Msg = 2
|
||||
Layer2Msg
|
||||
)
|
||||
|
||||
// CrossMsg represents a cross message from layer 1 to layer 2
|
||||
type CrossMsg struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Amount string `json:"amount" db:"amount"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Layer1Token string `json:"layer1_token" db:"layer1_token"`
|
||||
Layer2Token string `json:"layer2_token" db:"layer2_token"`
|
||||
TokenIDs string `json:"token_ids" db:"token_ids"`
|
||||
TokenAmounts string `json:"token_amounts" db:"token_amounts"`
|
||||
Asset int `json:"asset" db:"asset"`
|
||||
MsgType int `json:"msg_type" db:"msg_type"`
|
||||
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Amount string `json:"amount" db:"amount"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
Layer1Token string `json:"layer1_token" db:"layer1_token"`
|
||||
Layer2Token string `json:"layer2_token" db:"layer2_token"`
|
||||
TokenID uint64 `json:"token_id" db:"token_id"`
|
||||
Asset int `json:"asset" db:"asset"`
|
||||
MsgType int `json:"msg_type" db:"msg_type"`
|
||||
IsDeleted bool `json:"is_deleted" db:"is_deleted"`
|
||||
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
type RelayedMsg struct {
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
}
|
||||
|
||||
// L1CrossMsgOrm provides operations on l1_cross_message table
|
||||
@@ -96,20 +93,17 @@ type L2CrossMsgOrm interface {
|
||||
DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error
|
||||
GetL2EarliestNoBlockTimestampHeight() (uint64, error)
|
||||
GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error)
|
||||
}
|
||||
|
||||
// RelayedMsgOrm provides operations on relayed_msg table
|
||||
type RelayedMsgOrm interface {
|
||||
BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*RelayedMsg) error
|
||||
GetRelayedMsgByHash(msgHash string) (*RelayedMsg, error)
|
||||
GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error)
|
||||
GetLatestRelayedHeightOnL1() (int64, error)
|
||||
GetLatestRelayedHeightOnL2() (int64, error)
|
||||
DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
}
|
||||
|
||||
// L2SentMsgOrm provides operations on l2_sent_msg table
|
||||
type L2SentMsgOrm interface {
|
||||
BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error
|
||||
GetL2SentMsgByHash(l2Hash string) (*L2SentMsg, error)
|
||||
@@ -117,14 +111,11 @@ type L2SentMsgOrm interface {
|
||||
GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error)
|
||||
GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error)
|
||||
GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error)
|
||||
UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batchIndex uint64) error
|
||||
UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error
|
||||
GetLatestL2SentMsgBatchIndex() (int64, error)
|
||||
GetClaimableL2SentMsgByAddressWithOffset(address string, offset int64, limit int64) ([]*L2SentMsg, error)
|
||||
GetClaimableL2SentMsgByAddressTotalNum(address string) (uint64, error)
|
||||
DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
}
|
||||
|
||||
// RollupBatchOrm provides operations on rollup_batch table
|
||||
type RollupBatchOrm interface {
|
||||
GetLatestRollupBatch() (*RollupBatch, error)
|
||||
GetRollupBatchByIndex(index uint64) (*RollupBatch, error)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -22,7 +23,7 @@ func NewL1CrossMsgOrm(db *sqlx.DB) L1CrossMsgOrm {
|
||||
|
||||
func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND deleted_at IS NULL;`, l1Hash.String(), Layer1Msg)
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND NOT is_deleted;`, l1Hash.String(), Layer1Msg)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
@@ -36,15 +37,8 @@ func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, erro
|
||||
// Warning: return empty slice if no data found
|
||||
func (l *l1CrossMsgOrm) GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND deleted_at IS NULL;`, sender.String(), Layer1Msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND NOT is_deleted;`, sender.String(), Layer1Msg)
|
||||
|
||||
for rows.Next() {
|
||||
msg := &CrossMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
@@ -72,15 +66,22 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
"target": msg.Target,
|
||||
"amount": msg.Amount,
|
||||
"asset": msg.Asset,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"layer1_hash": msg.Layer1Hash,
|
||||
"layer1_token": msg.Layer1Token,
|
||||
"layer2_token": msg.Layer2Token,
|
||||
"token_ids": msg.TokenIDs,
|
||||
"token_id": msg.TokenID,
|
||||
"msg_type": Layer1Msg,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM cross_message WHERE layer1_hash = $1 AND NOT is_deleted)`, msg.Layer1Hash).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL1CrossMsgDBTx: l1 cross msg layer1Hash %v already exists at height %v", msg.Layer1Hash, msg.Height)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, amount, asset, msg_hash, layer1_hash, layer1_token, layer2_token, token_ids, msg_type) values(:height, :sender, :target, :amount, :asset, :msg_hash, :layer1_hash, :layer1_token, :layer2_token, :token_ids, :msg_type);`, messageMaps)
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_id, amount, msg_type) values(:height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err)
|
||||
return err
|
||||
@@ -91,7 +92,7 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
|
||||
// UpdateL1CrossMsgHashDBTx update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
|
||||
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND NOT is_deleted;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -99,7 +100,7 @@ func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND NOT is_deleted;"), msgHash.String(), l1Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -107,7 +108,7 @@ func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHas
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer1Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND NOT is_deleted ORDER BY id DESC LIMIT 1;`, Layer1Msg)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -122,21 +123,21 @@ func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET deleted_at = current_timestamp WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET is_deleted = true WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer1Msg); err != nil {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer1Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetL1EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer1Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer1Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
type l2CrossMsgOrm struct {
|
||||
@@ -23,7 +23,7 @@ func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND deleted_at IS NULL;`, l2Hash.String())
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
@@ -37,15 +37,8 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, erro
|
||||
// Warning: return empty slice if no data found
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND deleted_at IS NULL;`, sender.String(), Layer2Msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND NOT is_deleted;`, sender.String(), Layer2Msg)
|
||||
|
||||
for rows.Next() {
|
||||
msg := &CrossMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
@@ -63,7 +56,7 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossM
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE cross_message SET deleted_at = current_timestamp where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
|
||||
_, err := dbTx.Exec(`UPDATE cross_message SET is_deleted = true where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
|
||||
if err != nil {
|
||||
log.Error("DeleteL1CrossMsgAfterHeightDBTx: failed to delete", "height", height, "err", err)
|
||||
return err
|
||||
@@ -79,21 +72,29 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
var err error
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"height": msg.Height,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"asset": msg.Asset,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"layer2_hash": msg.Layer2Hash,
|
||||
"layer1_token": msg.Layer1Token,
|
||||
"layer2_token": msg.Layer2Token,
|
||||
"token_ids": msg.TokenIDs,
|
||||
"token_id": msg.TokenID,
|
||||
"amount": msg.Amount,
|
||||
"msg_type": Layer2Msg,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted)`, msg.Layer2Hash).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL2CrossMsgDBTx: l2 cross msg layer2Hash %v already exists at height %v", msg.Layer2Hash, msg.Height)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, msg_hash, layer2_hash, layer1_token, layer2_token, token_ids, amount, msg_type) values(:height, :sender, :target, :asset, :msg_hash, :layer2_hash, :layer1_token, :layer2_token, :token_ids, :amount, :msg_type);`, messageMaps)
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_id, amount, msg_type) values(:height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err)
|
||||
return err
|
||||
@@ -102,21 +103,21 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer2_hash = ? AND NOT is_deleted;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update cross_message set msg_hash = ? where layer2_hash = ? AND deleted_at IS NULL;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer2_hash = ? AND NOT is_deleted;"), msgHash.String(), l2Hash.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND deleted_at IS NULL ORDER BY id DESC LIMIT 1;`, Layer2Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND NOT is_deleted ORDER BY id DESC LIMIT 1;`, Layer2Msg)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -131,14 +132,14 @@ func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND deleted_at IS NULL`, timestamp, height, Layer2Msg); err != nil {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer2Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND deleted_at IS NULL ORDER BY height ASC LIMIT 1;`, Layer2Msg)
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer2Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
@@ -148,30 +149,3 @@ func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByMsgHashList(msgHashList []string) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE msg_hash = ANY($1) AND msg_type = $2 AND deleted_at IS NULL;`, pq.Array(msgHashList), Layer2Msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
for rows.Next() {
|
||||
msg := &CrossMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
break
|
||||
}
|
||||
results = append(results, msg)
|
||||
}
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, err
|
||||
}
|
||||
if len(results) == 0 {
|
||||
log.Debug("no L2CrossMsg under given msg hashes", "msg hash list", msgHashList)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
@@ -3,29 +3,28 @@ package orm
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// L2SentMsg defines the struct for l2_sent_msg table record
|
||||
type L2SentMsg struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
OriginalSender string `json:"original_sender" db:"original_sender"`
|
||||
TxHash string `json:"tx_hash" db:"tx_hash"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
|
||||
MsgProof string `json:"msg_proof" db:"msg_proof"`
|
||||
MsgData string `json:"msg_data" db:"msg_data"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
|
||||
MsgProof string `json:"msg_proof" db:"msg_proof"`
|
||||
MsgData string `json:"msg_data" db:"msg_data"`
|
||||
IsDeleted bool `json:"is_deleted" db:"is_deleted"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
type l2SentMsgOrm struct {
|
||||
@@ -39,7 +38,7 @@ func NewL2SentMsgOrm(db *sqlx.DB) L2SentMsgOrm {
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMsgByHash(msgHash string) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msgHash)
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE msg_hash = $1 AND NOT is_deleted;`, msgHash)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -54,29 +53,35 @@ func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2Sen
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"original_sender": msg.OriginalSender,
|
||||
"tx_hash": msg.TxHash,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"value": msg.Value,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"height": msg.Height,
|
||||
"nonce": msg.Nonce,
|
||||
"batch_index": msg.BatchIndex,
|
||||
"msg_proof": msg.MsgProof,
|
||||
"msg_data": msg.MsgData,
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"value": msg.Value,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"height": msg.Height,
|
||||
"nonce": msg.Nonce,
|
||||
"batch_index": msg.BatchIndex,
|
||||
"msg_proof": msg.MsgProof,
|
||||
"msg_data": msg.MsgData,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM l2_sent_msg WHERE (msg_hash = $1 OR nonce = $2) AND NOT is_deleted)`, msg.MsgHash, msg.Nonce).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL2SentMsgDBTx: l2 sent msg_hash %v already exists at height %v", msg.MsgHash, msg.Height)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into l2_sent_msg(original_sender, tx_hash, sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:original_sender, :tx_hash, :sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
|
||||
_, err = dbTx.NamedExec(`insert into l2_sent_msg(sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "err", err)
|
||||
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "msg_Hash", "err", err)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM l2_sent_msg WHERE deleted_at IS NULL ORDER BY nonce DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT height FROM l2_sent_msg WHERE NOT is_deleted ORDER BY nonce DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -90,15 +95,15 @@ func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batchIndex uint64) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND deleted_at IS NULL;"), proof, batchIndex, msgHash); err != nil {
|
||||
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND NOT is_deleted;"), proof, batch_index, msgHash); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE batch_index != 0 AND deleted_at IS NULL ORDER BY batch_index DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE msg_proof != '' AND NOT is_deleted ORDER BY batch_index DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -114,15 +119,10 @@ func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE height >= $1 AND height <= $2 AND deleted_at IS NULL ORDER BY nonce ASC;`, startHeight, endHeight)
|
||||
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE height >= $1 AND height <= $2 AND NOT is_deleted ORDER BY nonce ASC;`, startHeight, endHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
for rows.Next() {
|
||||
msg := &L2SentMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
@@ -135,7 +135,7 @@ func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight u
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE nonce = $1 AND deleted_at IS NULL;`, nonce)
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE nonce = $1 AND NOT is_deleted;`, nonce)
|
||||
err := row.StructScan(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -145,7 +145,7 @@ func (l *l2SentMsgOrm) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error)
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`select * from l2_sent_msg where height <= $1 AND deleted_at IS NULL order by nonce desc limit 1`, endBlockNumber)
|
||||
row := l.db.QueryRowx(`select * from l2_sent_msg where height <= $1 AND NOT is_deleted order by nonce desc limit 1`, endBlockNumber)
|
||||
err := row.StructScan(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -154,36 +154,6 @@ func (l *l2SentMsgOrm) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2Sen
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET deleted_at = current_timestamp WHERE height > $1;`, height)
|
||||
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET is_deleted = true WHERE height > $1;`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetClaimableL2SentMsgByAddressWithOffset(address string, offset int64, limit int64) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
for rows.Next() {
|
||||
msg := &L2SentMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
break
|
||||
}
|
||||
results = append(results, msg)
|
||||
}
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetClaimableL2SentMsgByAddressTotalNum(address string) (uint64, error) {
|
||||
var count uint64
|
||||
row := l.db.QueryRowx(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1);`, address)
|
||||
if err := row.Scan(&count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
@@ -8,14 +8,6 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// RelayedMsg is the struct for relayed_msg table
|
||||
type RelayedMsg struct {
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
|
||||
}
|
||||
|
||||
type relayedMsgOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
@@ -41,15 +33,15 @@ func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*Rel
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into relayed_msg(msg_hash, height, layer1_hash, layer2_hash) values(:msg_hash, :height, :layer1_hash, :layer2_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertRelayedMsgDBTx: failed to insert relayed msgs", "err", err)
|
||||
log.Error("BatchInsertRelayedMsgDBTx: failed to insert l1 cross msgs", "msg_Hashe", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetRelayedMsgByHash(msgHash string) (*RelayedMsg, error) {
|
||||
func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error) {
|
||||
result := &RelayedMsg{}
|
||||
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND deleted_at IS NULL;`, msgHash)
|
||||
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND NOT is_deleted;`, msg_hash)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
@@ -60,7 +52,7 @@ func (l *relayedMsgOrm) GetRelayedMsgByHash(msgHash string) (*RelayedMsg, error)
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND NOT is_deleted ORDER BY height DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -75,7 +67,7 @@ func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND deleted_at IS NULL ORDER BY height DESC LIMIT 1;`)
|
||||
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND NOT is_deleted ORDER BY height DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
@@ -90,11 +82,11 @@ func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer1_hash != '';`, height)
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET is_deleted = true WHERE height > $1 AND layer1_hash != '';`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET deleted_at = current_timestamp WHERE height > $1 AND layer2_hash != '';`, height)
|
||||
_, err := dbTx.Exec(`UPDATE relayed_msg SET is_deleted = true WHERE height > $1 AND layer2_hash != '';`, height)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq" //nolint:golint
|
||||
|
||||
@@ -69,7 +68,7 @@ func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
|
||||
|
||||
func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, error) {
|
||||
var count uint64
|
||||
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND deleted_at IS NULL;`, sender)
|
||||
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND NOT is_deleted;`, sender)
|
||||
if err := row.Scan(&count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -79,15 +78,10 @@ func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, erro
|
||||
func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error) {
|
||||
para := sender
|
||||
var results []*orm.CrossMsg
|
||||
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND deleted_at IS NULL ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
|
||||
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND NOT is_deleted ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
|
||||
if err != nil || rows == nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err = rows.Close(); err != nil {
|
||||
log.Error("failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
for rows.Next() {
|
||||
msg := &orm.CrossMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
|
||||
@@ -7,17 +7,17 @@ require (
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/kataras/iris/v12 v12.2.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/lib/pq v1.10.7
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.19
|
||||
github.com/mattn/go-isatty v0.0.18
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/BurntSushi/toml v1.2.1 // indirect
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 // indirect
|
||||
github.com/CloudyKit/jet/v6 v6.2.0 // indirect
|
||||
github.com/DataDog/zstd v1.5.2 // indirect
|
||||
@@ -44,7 +44,7 @@ require (
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/docker/docker v23.0.6+incompatible // indirect
|
||||
github.com/docker/docker v20.10.21+incompatible // indirect
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
@@ -54,7 +54,7 @@ require (
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.1 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gobwas/httphead v0.1.0 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
@@ -91,7 +91,7 @@ require (
|
||||
github.com/mailgun/raymond/v2 v2.0.48 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mediocregopher/radix/v3 v3.8.1 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.23 // indirect
|
||||
@@ -134,16 +134,16 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yosssi/ace v0.0.5 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/crypto v0.10.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.9.0 // indirect
|
||||
golang.org/x/text v0.10.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.11.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
google.golang.org/protobuf v1.29.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
|
||||
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 h1:sR+/8Yb4slttB4vD+b9btVEnWgL3Q00OBTzVT8B9C0c=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
|
||||
@@ -97,8 +97,8 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
|
||||
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
|
||||
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
|
||||
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
@@ -144,8 +144,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
|
||||
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
@@ -303,8 +303,8 @@ github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awS
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw=
|
||||
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
|
||||
@@ -326,15 +326,15 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
|
||||
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
|
||||
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
@@ -456,14 +456,18 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM=
|
||||
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
@@ -484,8 +488,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
@@ -533,8 +537,8 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
@@ -547,7 +551,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -570,8 +574,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -581,8 +585,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -624,8 +628,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -634,8 +638,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
@@ -656,8 +660,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
|
||||
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -688,8 +692,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0=
|
||||
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
package model
|
||||
|
||||
// QueryByAddressRequest the request parameter of address api
|
||||
type QueryByAddressRequest struct {
|
||||
Address string `url:"address"`
|
||||
Offset int `url:"offset"`
|
||||
Limit int `url:"limit"`
|
||||
}
|
||||
|
||||
// QueryByHashRequest the request parameter of hash api
|
||||
type QueryByHashRequest struct {
|
||||
Txs []string `url:"txs"`
|
||||
}
|
||||
|
||||
@@ -2,19 +2,16 @@ package model
|
||||
|
||||
import "bridge-history-api/service"
|
||||
|
||||
// Data the return struct of apis
|
||||
type Data struct {
|
||||
Result []*service.TxHistoryInfo `json:"result"`
|
||||
Total uint64 `json:"total"`
|
||||
}
|
||||
|
||||
// QueryByAddressResponse the schema of address api response
|
||||
type QueryByAddressResponse struct {
|
||||
Message string `json:"message"`
|
||||
Data *Data `json:"data"`
|
||||
}
|
||||
|
||||
// QueryByHashResponse the schema of hash api response
|
||||
type QueryByHashResponse struct {
|
||||
Message string `json:"message"`
|
||||
Data *Data `json:"data"`
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"bridge-history-api/db/orm"
|
||||
)
|
||||
|
||||
// Finalized the schema of tx finalized infos
|
||||
type Finalized struct {
|
||||
Hash string `json:"hash"`
|
||||
Amount string `json:"amount"`
|
||||
@@ -21,7 +20,6 @@ type Finalized struct {
|
||||
BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss
|
||||
}
|
||||
|
||||
// UserClaimInfo the schema of tx claim infos
|
||||
type UserClaimInfo struct {
|
||||
From string `json:"from"`
|
||||
To string `json:"to"`
|
||||
@@ -33,7 +31,6 @@ type UserClaimInfo struct {
|
||||
BatchIndex string `json:"batch_index"`
|
||||
}
|
||||
|
||||
// TxHistoryInfo the schema of tx history infos
|
||||
type TxHistoryInfo struct {
|
||||
Hash string `json:"hash"`
|
||||
Amount string `json:"amount"`
|
||||
@@ -50,7 +47,6 @@ type TxHistoryInfo struct {
|
||||
type HistoryService interface {
|
||||
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
|
||||
GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error)
|
||||
GetClaimableTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
|
||||
}
|
||||
|
||||
// NewHistoryService returns a service backed with a "db"
|
||||
@@ -64,7 +60,6 @@ type historyBackend struct {
|
||||
db db.OrmFactory
|
||||
}
|
||||
|
||||
// GetCrossTxClaimInfo get UserClaimInfos by address
|
||||
func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
|
||||
l2sentMsg, err := db.GetL2SentMsgByHash(msgHash)
|
||||
if err != nil {
|
||||
@@ -82,7 +77,7 @@ func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
|
||||
Value: l2sentMsg.Value,
|
||||
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
|
||||
Message: l2sentMsg.MsgData,
|
||||
Proof: "0x" + l2sentMsg.MsgProof,
|
||||
Proof: l2sentMsg.MsgProof,
|
||||
BatchHash: batch.BatchHash,
|
||||
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
|
||||
}
|
||||
@@ -111,50 +106,6 @@ func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory)
|
||||
|
||||
}
|
||||
|
||||
// GetClaimableTxsByAddress get all claimable txs under given address
|
||||
func (h *historyBackend) GetClaimableTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*TxHistoryInfo
|
||||
total, err := h.db.GetClaimableL2SentMsgByAddressTotalNum(address.Hex())
|
||||
if err != nil || total == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
results, err := h.db.GetClaimableL2SentMsgByAddressWithOffset(address.Hex(), offset, limit)
|
||||
if err != nil || len(results) == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
var msgHashList []string
|
||||
for _, result := range results {
|
||||
msgHashList = append(msgHashList, result.MsgHash)
|
||||
}
|
||||
crossMsgs, err := h.db.GetL2CrossMsgByMsgHashList(msgHashList)
|
||||
// crossMsgs can be empty, because they can be emitted by user directly call contract
|
||||
if err != nil {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
crossMsgMap := make(map[string]*orm.CrossMsg)
|
||||
for _, crossMsg := range crossMsgs {
|
||||
crossMsgMap[crossMsg.MsgHash] = crossMsg
|
||||
}
|
||||
for _, result := range results {
|
||||
txInfo := &TxHistoryInfo{
|
||||
Hash: result.TxHash,
|
||||
IsL1: false,
|
||||
BlockNumber: result.Height,
|
||||
FinalizeTx: &Finalized{},
|
||||
ClaimInfo: GetCrossTxClaimInfo(result.MsgHash, h.db),
|
||||
}
|
||||
if crossMsg, exist := crossMsgMap[result.MsgHash]; exist {
|
||||
txInfo.Amount = crossMsg.Amount
|
||||
txInfo.To = crossMsg.Target
|
||||
txInfo.BlockTimestamp = crossMsg.Timestamp
|
||||
txInfo.CreatedAt = crossMsg.CreatedAt
|
||||
}
|
||||
txHistories = append(txHistories, txInfo)
|
||||
}
|
||||
return txHistories, total, err
|
||||
}
|
||||
|
||||
// GetTxsByAddress get all txs under given address
|
||||
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*TxHistoryInfo
|
||||
total, err := h.db.GetTotalCrossMsgCountByAddress(address.String())
|
||||
@@ -186,7 +137,6 @@ func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, l
|
||||
return txHistories, total, nil
|
||||
}
|
||||
|
||||
// GetTxsByHashes get tx infos under given tx hashes
|
||||
func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) {
|
||||
txHistories := make([]*TxHistoryInfo, 0)
|
||||
for _, hash := range hashes {
|
||||
|
||||
@@ -2,9 +2,8 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
@@ -14,7 +13,11 @@ import (
|
||||
"bridge-history-api/db/orm"
|
||||
)
|
||||
|
||||
// CachedParsedTxCalldata store parsed batch infos
|
||||
type MsgHashWrapper struct {
|
||||
MsgHash common.Hash
|
||||
TxHash common.Hash
|
||||
}
|
||||
|
||||
type CachedParsedTxCalldata struct {
|
||||
CallDataIndex uint64
|
||||
BatchIndices []uint64
|
||||
@@ -22,14 +25,13 @@ type CachedParsedTxCalldata struct {
|
||||
EndBlocks []uint64
|
||||
}
|
||||
|
||||
// ParseBackendL1EventLogs parses L1 watched events
|
||||
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
|
||||
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l1CrossMsg []*orm.CrossMsg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var msgHash string
|
||||
var msgHashes []MsgHashWrapper
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1DepositETHSig:
|
||||
@@ -37,7 +39,7 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositETH event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -46,14 +48,13 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC20 event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -64,14 +65,13 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC721 event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -81,15 +81,14 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
MsgHash: msgHash,
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
})
|
||||
case backendabi.L1DepositERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
@@ -99,62 +98,26 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
Amount: event.Amount.String(),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1SentMessageEventSignature:
|
||||
event := backendabi.L1SentMessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
// since every deposit event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
|
||||
msgHash = ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).Hex()
|
||||
case backendabi.L1BatchDepositERC721Sig:
|
||||
event := backendabi.BatchERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "BatchDepositERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC721),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1BatchDepositERC1155Sig:
|
||||
event := backendabi.BatchERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC1155),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
|
||||
msgHashes = append(msgHashes, MsgHashWrapper{
|
||||
MsgHash: msgHash,
|
||||
TxHash: vlog.TxHash})
|
||||
case backendabi.L1RelayedMessageEventSignature:
|
||||
event := backendabi.L1RelayedMessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
@@ -165,18 +128,18 @@ func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
}
|
||||
|
||||
}
|
||||
return l1CrossMsg, relayedMsgs, nil
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, nil
|
||||
}
|
||||
|
||||
// ParseBackendL2EventLogs parses L2 watched events
|
||||
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
|
||||
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2CrossMsg []*orm.CrossMsg
|
||||
// this is use to confirm finalized l1 msg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var l2SentMsgs []*orm.L2SentMsg
|
||||
var l2SentMsg []*orm.L2SentMsg
|
||||
var msgHashes []MsgHashWrapper
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L2WithdrawETHSig:
|
||||
@@ -184,9 +147,8 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawETH event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
@@ -194,16 +156,14 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2WithdrawERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
@@ -213,16 +173,14 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2WithdrawERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
@@ -231,17 +189,15 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
})
|
||||
case backendabi.L2WithdrawERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
@@ -250,75 +206,35 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
Amount: event.Amount.String(),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2BatchWithdrawERC721Sig:
|
||||
event := backendabi.BatchERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC721),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2BatchWithdrawERC1155Sig:
|
||||
event := backendabi.BatchERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC1155),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2SentMessageEventSignature:
|
||||
event := backendabi.L2SentMessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
// since every withdraw event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
|
||||
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
|
||||
l2SentMsgs = append(l2SentMsgs,
|
||||
&orm.L2SentMsg{
|
||||
Sender: event.Sender.Hex(),
|
||||
TxHash: vlog.TxHash.Hex(),
|
||||
Target: event.Target.Hex(),
|
||||
Value: event.Value.String(),
|
||||
MsgHash: msgHash.Hex(),
|
||||
Height: vlog.BlockNumber,
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgData: hexutil.Encode(event.Message),
|
||||
})
|
||||
msgHashes = append(msgHashes, MsgHashWrapper{
|
||||
MsgHash: msgHash,
|
||||
TxHash: vlog.TxHash})
|
||||
l2SentMsg = append(l2SentMsg, &orm.L2SentMsg{
|
||||
Sender: event.Sender.Hex(),
|
||||
Target: event.Target.Hex(),
|
||||
Value: event.Value.String(),
|
||||
MsgHash: msgHash.Hex(),
|
||||
Height: vlog.BlockNumber,
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgData: hexutil.Encode(event.Message),
|
||||
})
|
||||
case backendabi.L2RelayedMessageEventSignature:
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
@@ -328,10 +244,9 @@ func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedM
|
||||
|
||||
}
|
||||
}
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, nil
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, nil
|
||||
}
|
||||
|
||||
// ParseBatchInfoFromScrollChain parses ScrollChain events
|
||||
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
|
||||
var rollupBatches []*orm.RollupBatch
|
||||
cache := make(map[string]CachedParsedTxCalldata)
|
||||
@@ -388,13 +303,3 @@ func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client
|
||||
}
|
||||
return rollupBatches, nil
|
||||
}
|
||||
|
||||
func convertBigIntArrayToString(array []*big.Int) string {
|
||||
stringArray := make([]string, len(array))
|
||||
for i, num := range array {
|
||||
stringArray[i] = num.String()
|
||||
}
|
||||
|
||||
result := strings.Join(stringArray, ", ")
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ func Keccak2(a common.Hash, b common.Hash) common.Hash {
|
||||
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
|
||||
}
|
||||
|
||||
// GetSafeBlockNumber get the safe block number, which is the current block number minus the confirmations
|
||||
func GetSafeBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil || number <= confirmations {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
.PHONY: lint docker clean bridge
|
||||
|
||||
IMAGE_NAME=bridge
|
||||
IMAGE_VERSION=latest
|
||||
IMAGE_VERSION=v9.9
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
mock_abi:
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL1.go
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./bridge/mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out ./bridge/mock_bridge/MockBridgeL2.go
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
|
||||
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
|
||||
|
||||
bridge_bins: ## Builds the Bridge bins.
|
||||
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -16,9 +16,9 @@ func TestEventSignature(t *testing.T) {
|
||||
assert.Equal(L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62"))
|
||||
assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("9d3058a3cb9739a2527f22dd9a4138065844037d3004254952e2458d808cc364"))
|
||||
assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("6be443154c959a7a1645b4392b6fa97d8e8ab6e8fd853d7085e8867083737d79"))
|
||||
|
||||
assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e"))
|
||||
assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("bdcc7517f8fe3db6506dfd910942d0bbecaf3d6a506dadea65b0d988e75b9439"))
|
||||
|
||||
assert.Equal(L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
|
||||
assert.Equal(L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
@@ -35,10 +35,10 @@ func TestPackRelayL2MessageWithProof(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
proof := IL1ScrollMessengerL2MessageProof{
|
||||
BatchIndex: big.NewInt(0),
|
||||
MerkleProof: []byte{},
|
||||
BatchHash: common.Hash{},
|
||||
MerkleProof: make([]byte, 0),
|
||||
}
|
||||
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), []byte{}, proof)
|
||||
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
@@ -48,12 +48,27 @@ func TestPackCommitBatch(t *testing.T) {
|
||||
scrollChainABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
version := uint8(1)
|
||||
var parentBatchHeader []byte
|
||||
var chunks [][]byte
|
||||
var skippedL1MessageBitmap []byte
|
||||
header := IScrollChainBlockContext{
|
||||
BlockHash: common.Hash{},
|
||||
ParentHash: common.Hash{},
|
||||
BlockNumber: 0,
|
||||
Timestamp: 0,
|
||||
BaseFee: big.NewInt(0),
|
||||
GasLimit: 0,
|
||||
NumTransactions: 0,
|
||||
NumL1Messages: 0,
|
||||
}
|
||||
|
||||
_, err = scrollChainABI.Pack("commitBatch", version, parentBatchHeader, chunks, skippedL1MessageBitmap)
|
||||
batch := IScrollChainBatch{
|
||||
Blocks: []IScrollChainBlockContext{header},
|
||||
PrevStateRoot: common.Hash{},
|
||||
NewStateRoot: common.Hash{},
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
BatchIndex: 0,
|
||||
L2Transactions: make([]byte, 0),
|
||||
}
|
||||
|
||||
_, err = scrollChainABI.Pack("commitBatch", batch)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
@@ -63,26 +78,14 @@ func TestPackFinalizeBatchWithProof(t *testing.T) {
|
||||
l1RollupABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
batchHeader := []byte{}
|
||||
prevStateRoot := common.Hash{}
|
||||
postStateRoot := common.Hash{}
|
||||
withdrawRoot := common.Hash{}
|
||||
aggrProof := []byte{}
|
||||
proof := make([]*big.Int, 10)
|
||||
instance := make([]*big.Int, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
proof[i] = big.NewInt(0)
|
||||
instance[i] = big.NewInt(0)
|
||||
}
|
||||
|
||||
_, err = l1RollupABI.Pack("finalizeBatchWithProof", batchHeader, prevStateRoot, postStateRoot, withdrawRoot, aggrProof)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackImportGenesisBatch(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1RollupABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
batchHeader := []byte{}
|
||||
stateRoot := common.Hash{}
|
||||
|
||||
_, err = l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
_, err = l1RollupABI.Pack("finalizeBatchWithProof", common.Hash{}, proof, instance)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
@@ -92,7 +95,7 @@ func TestPackRelayL1Message(t *testing.T) {
|
||||
l2MessengerABI, err := L2ScrollMessengerMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), []byte{})
|
||||
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0))
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
@@ -123,6 +126,6 @@ func TestPackImportBlock(t *testing.T) {
|
||||
|
||||
l1BlockContainerABI := L1BlockContainerABI
|
||||
|
||||
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, []byte{}, false)
|
||||
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, make([]byte, 0), false)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
@@ -11,13 +11,13 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
cutils "scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -29,18 +29,18 @@ func init() {
|
||||
app.Name = "event-watcher"
|
||||
app.Usage = "The Scroll Event Watcher"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
return cutils.LogSetup(ctx)
|
||||
}
|
||||
// Register `event-watcher-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.EventWatcherApp)
|
||||
cutils.RegisterSimulation(app, cutils.EventWatcherApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
@@ -48,13 +48,13 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
db, err := utils.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
if err = utils.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
@@ -75,14 +75,14 @@ func action(ctx *cli.Context) error {
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
|
||||
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
|
||||
|
||||
go utils.Loop(subCtx, 10*time.Second, func() {
|
||||
go cutils.Loop(subCtx, 10*time.Second, func() {
|
||||
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", loopErr)
|
||||
}
|
||||
})
|
||||
|
||||
// Start l2 watcher process
|
||||
go utils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
|
||||
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
|
||||
// Finish start all l2 functions
|
||||
log.Info("Start event-watcher successfully")
|
||||
|
||||
|
||||
@@ -11,15 +11,14 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
cutils "scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
butils "scroll-tech/bridge/internal/utils"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -32,31 +31,31 @@ func init() {
|
||||
app.Usage = "The Scroll Gas Oracle"
|
||||
app.Description = "Scroll Gas Oracle."
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
return cutils.LogSetup(ctx)
|
||||
}
|
||||
// Register `gas-oracle-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.GasOracleApp)
|
||||
cutils.RegisterSimulation(app, cutils.GasOracleApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
db, err := utils.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
if err = utils.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
@@ -77,22 +76,21 @@ func action(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
// Start l1 watcher process
|
||||
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
|
||||
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
|
||||
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
|
||||
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
|
||||
if loopErr != nil {
|
||||
log.Error("failed to get block number", "err", loopErr)
|
||||
return
|
||||
@@ -104,8 +102,8 @@ func action(ctx *cli.Context) error {
|
||||
})
|
||||
|
||||
// Start l1relayer process
|
||||
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
|
||||
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
|
||||
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start gas-oracle successfully")
|
||||
|
||||
@@ -10,13 +10,13 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
cutils "scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -29,18 +29,18 @@ func init() {
|
||||
app.Usage = "The Scroll Message Relayer"
|
||||
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
return cutils.LogSetup(ctx)
|
||||
}
|
||||
// Register `message-relayer-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.MessageRelayerApp)
|
||||
cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
@@ -48,13 +48,13 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
db, err := utils.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
if err = utils.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
@@ -62,6 +62,8 @@ func action(ctx *cli.Context) error {
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
// Init l2geth connection
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
@@ -69,7 +71,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
// Start l1relayer process
|
||||
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
|
||||
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start message-relayer successfully")
|
||||
|
||||
@@ -11,15 +11,14 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
cutils "scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
butils "scroll-tech/bridge/internal/utils"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -31,19 +30,18 @@ func init() {
|
||||
app.Name = "rollup-relayer"
|
||||
app.Usage = "The Scroll Rollup Relayer"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, utils.RollupRelayerFlags...)
|
||||
app.Flags = append(app.Flags, cutils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
return cutils.LogSetup(ctx)
|
||||
}
|
||||
// Register `rollup-relayer-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.RollupRelayerApp)
|
||||
cutils.RegisterSimulation(app, cutils.RollupRelayerApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
@@ -51,13 +49,13 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
db, err := utils.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
if err = utils.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
@@ -72,45 +70,37 @@ func action(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db)
|
||||
if err != nil {
|
||||
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db)
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, db)
|
||||
if err != nil {
|
||||
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
|
||||
|
||||
// Watcher loop to fetch missing blocks
|
||||
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
|
||||
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
|
||||
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
|
||||
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
|
||||
if loopErr != nil {
|
||||
log.Error("failed to get block number", "err", loopErr)
|
||||
return
|
||||
}
|
||||
l2watcher.TryFetchRunningMissingBlocks(number)
|
||||
l2watcher.TryFetchRunningMissingBlocks(ctx, number)
|
||||
})
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
|
||||
// Batch proposer loop
|
||||
go cutils.Loop(subCtx, 2*time.Second, func() {
|
||||
batchProposer.TryProposeBatch()
|
||||
batchProposer.TryCommitBatches()
|
||||
})
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch)
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully")
|
||||
|
||||
@@ -72,20 +72,18 @@
|
||||
"1414141414141414141414141414141414141414141414141414141414141414"
|
||||
]
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"max_tx_gas_per_chunk": 1123456,
|
||||
"max_l2_tx_num_per_chunk": 1123,
|
||||
"max_l1_commit_gas_per_chunk": 11234567,
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"min_l1_commit_calldata_size_per_chunk": 11234,
|
||||
"chunk_timeout_sec": 300
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
"max_chunk_num_per_batch": 112,
|
||||
"max_l1_commit_gas_per_batch": 11234567,
|
||||
"max_l1_commit_calldata_size_per_batch": 112345,
|
||||
"min_chunk_num_per_batch": 11,
|
||||
"batch_timeout_sec": 300
|
||||
"proof_generation_freq": 1,
|
||||
"batch_gas_threshold": 3000000,
|
||||
"batch_tx_num_threshold": 44,
|
||||
"batch_time_sec": 300,
|
||||
"batch_commit_time_sec": 1200,
|
||||
"batch_blocks_limit": 100,
|
||||
"commit_tx_calldata_size_limit": 200000,
|
||||
"public_input_config": {
|
||||
"max_tx_num": 44,
|
||||
"padding_tx_hash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
|
||||
@@ -1,17 +1,22 @@
|
||||
module scroll-tech/bridge
|
||||
|
||||
go 1.19
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.7
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230508165858-27a3830afa61
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.3.0
|
||||
gorm.io/gorm v1.25.2
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
|
||||
golang.org/x/sync v0.1.0
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
gorm.io/gorm v1.25.1
|
||||
modernc.org/mathutil v1.4.1
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -21,6 +26,7 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
@@ -29,16 +35,20 @@ require (
|
||||
github.com/holiman/uint256 v1.2.2 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.3.0 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.14 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
@@ -52,10 +62,11 @@ require (
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/crypto v0.9.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
116
bridge/go.sum
116
bridge/go.sum
@@ -18,6 +18,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
@@ -28,6 +29,9 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
|
||||
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -51,6 +55,13 @@ github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixH
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
|
||||
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
|
||||
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
@@ -58,11 +69,16 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
@@ -70,15 +86,20 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
|
||||
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
|
||||
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@@ -92,20 +113,25 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
|
||||
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56 h1:Cqj7haxwvzI2O4n9ZZ25helShzFGCy7Z/B+FFSBFHNI=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230508165858-27a3830afa61 h1:4fslVpNOPLeJPYX3tivrVWgqNvChPs7/y9OqWvQSNCw=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230508165858-27a3830afa61/go.mod h1:45PZqlQCqV0dU4o4+SE8LoJLEvXkK5j45ligvbih9QY=
|
||||
github.com/scroll-tech/zktrie v0.5.3 h1:jjzQchGU6XPL5s1C5bwwivSadefSRuYASE9OL7UKAdE=
|
||||
github.com/scroll-tech/zktrie v0.5.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -118,8 +144,16 @@ github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL
|
||||
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
@@ -127,48 +161,94 @@ github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYm
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
|
||||
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
|
||||
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
|
||||
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
|
||||
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
|
||||
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
|
||||
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
|
||||
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
|
||||
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
|
||||
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
|
||||
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
|
||||
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
|
||||
|
||||
@@ -4,15 +4,13 @@ import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
// Config load configuration items.
|
||||
type Config struct {
|
||||
L1Config *L1Config `json:"l1_config"`
|
||||
L2Config *L2Config `json:"l2_config"`
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
L1Config *L1Config `json:"l1_config"`
|
||||
L2Config *L2Config `json:"l2_config"`
|
||||
DBConfig *DBConfig `json:"db_config"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
|
||||
33
bridge/internal/config/db.go
Normal file
33
bridge/internal/config/db.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// DBConfig db config
|
||||
type DBConfig struct {
|
||||
// data source name
|
||||
DSN string `json:"dsn"`
|
||||
DriverName string `json:"driver_name"`
|
||||
|
||||
MaxOpenNum int `json:"maxOpenNum"`
|
||||
MaxIdleNum int `json:"maxIdleNum"`
|
||||
}
|
||||
|
||||
// NewDBConfig returns a new instance of Config.
|
||||
func NewDBConfig(file string) (*DBConfig, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &DBConfig{}
|
||||
err = json.Unmarshal(buf, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
|
||||
"scroll-tech/bridge/internal/types"
|
||||
)
|
||||
|
||||
// L2Config loads l2geth configuration items.
|
||||
@@ -20,27 +22,28 @@ type L2Config struct {
|
||||
WithdrawTrieRootSlot common.Hash `json:"withdraw_trie_root_slot,omitempty"`
|
||||
// The relayer config
|
||||
RelayerConfig *RelayerConfig `json:"relayer_config"`
|
||||
// The chunk_proposer config
|
||||
ChunkProposerConfig *ChunkProposerConfig `json:"chunk_proposer_config"`
|
||||
// The batch_proposer config
|
||||
BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"`
|
||||
}
|
||||
|
||||
// ChunkProposerConfig loads chunk_proposer configuration items.
|
||||
type ChunkProposerConfig struct {
|
||||
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
|
||||
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
|
||||
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
|
||||
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
|
||||
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
}
|
||||
|
||||
// BatchProposerConfig loads batch_proposer configuration items.
|
||||
// BatchProposerConfig loads l2watcher batch_proposer configuration items.
|
||||
type BatchProposerConfig struct {
|
||||
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
|
||||
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
|
||||
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
|
||||
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
|
||||
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
|
||||
// Proof generation frequency, generating proof every k blocks
|
||||
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
|
||||
// Txnum threshold in a batch
|
||||
BatchTxNumThreshold uint64 `json:"batch_tx_num_threshold"`
|
||||
// Gas threshold in a batch
|
||||
BatchGasThreshold uint64 `json:"batch_gas_threshold"`
|
||||
// Time waited to generate a batch even if gas_threshold not met
|
||||
BatchTimeSec uint64 `json:"batch_time_sec"`
|
||||
// Time waited to commit batches before the calldata met CommitTxCalldataSizeLimit
|
||||
BatchCommitTimeSec uint64 `json:"batch_commit_time_sec"`
|
||||
// Max number of blocks in a batch
|
||||
BatchBlocksLimit uint64 `json:"batch_blocks_limit"`
|
||||
// Commit tx calldata size limit in bytes, target to cap the gas use of commit tx at 2M gas
|
||||
CommitTxCalldataSizeLimit uint64 `json:"commit_tx_calldata_size_limit"`
|
||||
// Commit tx calldata min size limit in bytes
|
||||
CommitTxCalldataMinSize uint64 `json:"commit_tx_calldata_min_size,omitempty"`
|
||||
// The public input hash config
|
||||
PublicInputConfig *types.PublicInputHashConfig `json:"public_input_config"`
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ type Layer1Relayer struct {
|
||||
gasPriceDiff uint64
|
||||
|
||||
l1MessageOrm *orm.L1Message
|
||||
l1BlockOrm *orm.L1Block
|
||||
l1Block *orm.L1Block
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
@@ -90,7 +90,7 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
l1Relayer := &Layer1Relayer{
|
||||
ctx: ctx,
|
||||
l1MessageOrm: orm.NewL1Message(db),
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
l1Block: orm.NewL1Block(db),
|
||||
|
||||
messageSender: messageSender,
|
||||
l2MessengerABI: bridgeAbi.L2ScrollMessengerABI,
|
||||
@@ -159,13 +159,13 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer2
|
||||
func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
|
||||
latestBlockHeight, err := r.l1Block.GetLatestL1BlockHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
blocks, err := r.l1BlockOrm.GetL1Blocks(r.ctx, map[string]interface{}{
|
||||
blocks, err := r.l1Block.GetL1Blocks(map[string]interface{}{
|
||||
"number": latestBlockHeight,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -197,7 +197,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
|
||||
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
|
||||
err = r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
return
|
||||
@@ -232,14 +232,14 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
err := r.l1Block.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
|
||||
@@ -12,14 +12,13 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/orm/migrate"
|
||||
bridgeUtils "scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -50,7 +49,7 @@ var (
|
||||
)
|
||||
|
||||
func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
db, err := bridgeUtils.InitDB(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -61,7 +60,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
// testCreateNewRelayer test create new relayer instance and stop
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
@@ -69,7 +68,7 @@ func testCreateNewL1Relayer(t *testing.T) {
|
||||
|
||||
func testL1RelayerProcessSaveEvents(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
l1Cfg := cfg.L1Config
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
@@ -87,7 +86,7 @@ func testL1RelayerProcessSaveEvents(t *testing.T) {
|
||||
|
||||
func testL1RelayerMsgConfirm(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
l1Messages := []*orm.L1Message{
|
||||
{MsgHash: "msg-1", QueueIndex: 0},
|
||||
@@ -124,12 +123,12 @@ func testL1RelayerMsgConfirm(t *testing.T) {
|
||||
|
||||
func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
|
||||
l1Block := []orm.L1Block{
|
||||
{Hash: "gas-oracle-1", Number: 0, GasOracleStatus: int16(types.GasOraclePending)},
|
||||
{Hash: "gas-oracle-2", Number: 1, GasOracleStatus: int16(types.GasOraclePending)},
|
||||
{Hash: "gas-oracle-1", Number: 0},
|
||||
{Hash: "gas-oracle-2", Number: 1},
|
||||
}
|
||||
// Insert test data.
|
||||
assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block))
|
||||
@@ -153,8 +152,8 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
msg1, err1 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-1"})
|
||||
msg2, err2 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-2"})
|
||||
msg1, err1 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-1"})
|
||||
msg2, err2 := l1BlockOrm.GetL1Blocks(map[string]interface{}{"hash": "gas-oracle-2"})
|
||||
return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
|
||||
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed
|
||||
})
|
||||
@@ -163,7 +162,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
|
||||
func testL1RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -175,28 +174,28 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
var l1BlockOrm *orm.L1Block
|
||||
convey.Convey("GetLatestL1BlockHeight failure", t, func() {
|
||||
targetErr := errors.New("GetLatestL1BlockHeight error")
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) {
|
||||
return 0, targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func() (uint64, error) {
|
||||
return 100, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
convey.Convey("GetL1Blocks failure", t, func() {
|
||||
targetErr := errors.New("GetL1Blocks error")
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
convey.Convey("Block not exist", t, func() {
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
|
||||
tmpInfo := []orm.L1Block{
|
||||
{Hash: "gas-oracle-1", Number: 0},
|
||||
{Hash: "gas-oracle-2", Number: 1},
|
||||
@@ -206,12 +205,12 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(fields map[string]interface{}) ([]orm.L1Block, error) {
|
||||
tmpInfo := []orm.L1Block{
|
||||
{
|
||||
Hash: "gas-oracle-1",
|
||||
Number: 0,
|
||||
GasOracleStatus: int16(types.GasOraclePending),
|
||||
GasOracleStatus: int(types.GasOraclePending),
|
||||
},
|
||||
}
|
||||
return tmpInfo, nil
|
||||
|
||||
@@ -3,13 +3,12 @@ package relayer
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
@@ -22,13 +21,18 @@ import (
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL2MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesSkippedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/skipped/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// Layer2Relayer is responsible for
|
||||
@@ -42,10 +46,9 @@ type Layer2Relayer struct {
|
||||
|
||||
l2Client *ethclient.Client
|
||||
|
||||
db *gorm.DB
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
blockBatchOrm *orm.BlockBatch
|
||||
blockTraceOrm *orm.BlockTrace
|
||||
l2MessageOrm *orm.L2Message
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
@@ -69,8 +72,8 @@ type Layer2Relayer struct {
|
||||
processingMessage sync.Map
|
||||
|
||||
// A list of processing batches commitment.
|
||||
// key(string): confirmation ID, value(string): batch hash.
|
||||
processingCommitment sync.Map
|
||||
// key(string): confirmation ID, value([]string): batch hashes.
|
||||
processingBatchesCommitment sync.Map
|
||||
|
||||
// A list of processing batch finalization.
|
||||
// key(string): confirmation ID, value(string): batch hash.
|
||||
@@ -78,7 +81,7 @@ type Layer2Relayer struct {
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
|
||||
// @todo use different sender for relayer, block commit and proof finalize
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
if err != nil {
|
||||
@@ -115,11 +118,10 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
|
||||
layer2Relayer := &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockBatchOrm: orm.NewBlockBatch(db),
|
||||
l2MessageOrm: orm.NewL2Message(db),
|
||||
blockTraceOrm: orm.NewBlockTrace(db),
|
||||
|
||||
l2Client: l2Client,
|
||||
|
||||
@@ -137,135 +139,20 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
|
||||
cfg: cfg,
|
||||
processingMessage: sync.Map{},
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
cfg: cfg,
|
||||
processingMessage: sync.Map{},
|
||||
processingBatchesCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
if initGenesis {
|
||||
if err := layer2Relayer.initializeGenesis(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
go layer2Relayer.handleConfirmLoop(ctx)
|
||||
return layer2Relayer, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) initializeGenesis() error {
|
||||
if count, err := r.batchOrm.GetBatchCount(r.ctx); err != nil {
|
||||
return fmt.Errorf("failed to get batch count: %v", err)
|
||||
} else if count > 0 {
|
||||
log.Info("genesis already imported", "batch count", count)
|
||||
return nil
|
||||
}
|
||||
|
||||
genesis, err := r.l2Client.HeaderByNumber(r.ctx, big.NewInt(0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
|
||||
}
|
||||
|
||||
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
|
||||
|
||||
chunk := &types.Chunk{
|
||||
Blocks: []*types.WrappedBlock{{
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
}},
|
||||
}
|
||||
|
||||
err = r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
var dbChunk *orm.Chunk
|
||||
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert chunk: %v", err)
|
||||
}
|
||||
|
||||
if err = r.chunkOrm.UpdateProvingStatus(r.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis chunk proving status: %v", err)
|
||||
}
|
||||
|
||||
var batch *orm.Batch
|
||||
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk}, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
|
||||
if err = r.chunkOrm.UpdateBatchHashInRange(r.ctx, 0, 0, batch.Hash, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update batch hash for chunks: %v", err)
|
||||
}
|
||||
|
||||
if err = r.batchOrm.UpdateProvingStatus(r.ctx, batch.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
|
||||
}
|
||||
|
||||
if err = r.batchOrm.UpdateRollupStatus(r.ctx, batch.Hash, types.RollupFinalized, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
|
||||
}
|
||||
|
||||
// commit genesis batch on L1
|
||||
// note: we do this inside the DB transaction so that we can revert all DB changes if this step fails
|
||||
return r.commitGenesisBatch(batch.Hash, batch.BatchHeader, common.HexToHash(batch.StateRoot))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("update genesis transaction failed: %v", err)
|
||||
}
|
||||
|
||||
log.Info("successfully imported genesis chunk and batch")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
|
||||
// encode "importGenesisBatch" transaction calldata
|
||||
calldata, err := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, err)
|
||||
}
|
||||
|
||||
// submit genesis batch to L1 rollup contract
|
||||
txHash, err := r.rollupSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
|
||||
}
|
||||
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash.String(), "batchHash", batchHash)
|
||||
|
||||
// wait for confirmation
|
||||
// we assume that no other transactions are sent before initializeGenesis completes
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
// print progress
|
||||
case <-ticker.C:
|
||||
log.Info("Waiting for confirmation", "pending count", r.rollupSender.PendingCount())
|
||||
|
||||
// timeout
|
||||
case <-time.After(5 * time.Minute):
|
||||
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash.String())
|
||||
|
||||
// handle confirmation
|
||||
case confirmation := <-r.rollupSender.ConfirmChan():
|
||||
if confirmation.ID != batchHash {
|
||||
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ID)
|
||||
}
|
||||
if !confirmation.IsSuccessful {
|
||||
return fmt.Errorf("import genesis batch tx failed")
|
||||
}
|
||||
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
const processMsgLimit = 100
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
batch, err := r.blockBatchOrm.GetLatestBatch()
|
||||
if err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "err", err)
|
||||
return
|
||||
@@ -296,7 +183,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
|
||||
err = r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
|
||||
err = r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
|
||||
return
|
||||
@@ -307,110 +194,93 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// get pending batches from database in ascending order by their index.
|
||||
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 10)
|
||||
// SendCommitTx sends commitBatches tx to L1.
|
||||
func (r *Layer2Relayer) SendCommitTx(batchData []*bridgeTypes.BatchData) error {
|
||||
if len(batchData) == 0 {
|
||||
log.Error("SendCommitTx receives empty batch")
|
||||
return nil
|
||||
}
|
||||
|
||||
// pack calldata
|
||||
commitBatches := make([]bridgeAbi.IScrollChainBatch, len(batchData))
|
||||
for i, batch := range batchData {
|
||||
commitBatches[i] = batch.Batch
|
||||
}
|
||||
calldata, err := r.l1RollupABI.Pack("commitBatches", commitBatches)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
log.Error("Failed to pack commitBatches",
|
||||
"error", err,
|
||||
"start_batch_index", commitBatches[0].BatchIndex,
|
||||
"end_batch_index", commitBatches[len(commitBatches)-1].BatchIndex)
|
||||
return err
|
||||
}
|
||||
for _, batch := range pendingBatches {
|
||||
// get current header and parent header.
|
||||
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
|
||||
if err != nil {
|
||||
log.Error("Failed to decode batch header", "index", batch.Index, "error", err)
|
||||
return
|
||||
}
|
||||
parentBatch := &orm.Batch{}
|
||||
if batch.Index > 0 {
|
||||
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
|
||||
if err != nil {
|
||||
log.Error("Failed to get parent batch header", "index", batch.Index-1, "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// get the chunks for the batch
|
||||
startChunkIndex := batch.StartChunkIndex
|
||||
endChunkIndex := batch.EndChunkIndex
|
||||
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, startChunkIndex, endChunkIndex)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch chunks",
|
||||
"start index", startChunkIndex,
|
||||
"end index", endChunkIndex, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
wrappedBlocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch wrapped blocks",
|
||||
"start number", c.StartBlockNumber,
|
||||
"end number", c.EndBlockNumber, "error", err)
|
||||
return
|
||||
}
|
||||
chunk := &types.Chunk{
|
||||
Blocks: wrappedBlocks,
|
||||
}
|
||||
var chunkBytes []byte
|
||||
chunkBytes, err = chunk.Encode(c.TotalL1MessagesPoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("Failed to encode chunk", "error", err)
|
||||
return
|
||||
}
|
||||
encodedChunks[i] = chunkBytes
|
||||
}
|
||||
|
||||
calldata, err := r.l1RollupABI.Pack("commitBatch", currentBatchHeader.Version(), parentBatch.BatchHeader, encodedChunks, currentBatchHeader.SkippedL1MessageBitmap())
|
||||
if err != nil {
|
||||
log.Error("Failed to pack commitBatch", "index", batch.Index, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// send transaction
|
||||
txID := batch.Hash + "-commit"
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("Failed to send commitBatch tx to layer1 ", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batch.Hash, txHash.String(), types.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
bridgeL2BatchesCommittedTotalCounter.Inc(1)
|
||||
r.processingCommitment.Store(txID, batch.Hash)
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
|
||||
// generate a unique txID and send transaction
|
||||
var bytes []byte
|
||||
for _, batch := range batchData {
|
||||
bytes = append(bytes, batch.Hash().Bytes()...)
|
||||
}
|
||||
txID := crypto.Keccak256Hash(bytes).String()
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("Failed to send commitBatches tx to layer1 ", "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
bridgeL2BatchesCommittedTotalCounter.Inc(int64(len(commitBatches)))
|
||||
log.Info("Sent the commitBatches tx to layer1",
|
||||
"tx_hash", txHash.Hex(),
|
||||
"start_batch_index", commitBatches[0].BatchIndex,
|
||||
"end_batch_index", commitBatches[len(commitBatches)-1].BatchIndex)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
batchHashes := make([]string, len(batchData))
|
||||
for i, batch := range batchData {
|
||||
batchHashes[i] = batch.Hash().Hex()
|
||||
err = r.blockBatchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHashes[i], txHash.String(), types.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batchHashes[i], "index", batch.Batch.BatchIndex, "err", err)
|
||||
}
|
||||
}
|
||||
r.processingBatchesCommitment.Store(txID, batchHashes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessCommittedBatches submit proof to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
// retrieves the earliest batch whose rollup status is 'committed'
|
||||
fields := map[string]interface{}{
|
||||
"rollup_status": types.RollupCommitted,
|
||||
// set skipped batches in a single db operation
|
||||
if count, err := r.blockBatchOrm.UpdateSkippedBatches(); err != nil {
|
||||
log.Error("UpdateSkippedBatches failed", "err", err)
|
||||
// continue anyway
|
||||
} else if count > 0 {
|
||||
bridgeL2BatchesSkippedTotalCounter.Inc(count)
|
||||
log.Info("Skipping batches", "count", count)
|
||||
}
|
||||
orderByList := []string{"index ASC"}
|
||||
limit := 1
|
||||
batches, err := r.batchOrm.GetBatches(r.ctx, fields, orderByList, limit)
|
||||
|
||||
// batches are sorted by batch index in increasing order
|
||||
batchHashes, err := r.blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupCommitted, 1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch committed L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batches) != 1 {
|
||||
log.Warn("Unexpected result for GetBlockBatches", "number of batches", len(batches))
|
||||
if len(batchHashes) == 0 {
|
||||
return
|
||||
}
|
||||
hash := batchHashes[0]
|
||||
// @todo add support to relay multiple batches
|
||||
|
||||
batches, err := r.blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": hash}, nil, 1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch committed L2 batch", "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
if len(batches) == 0 {
|
||||
log.Error("Unexpected result for GetBlockBatches", "hash", hash, "len", 0)
|
||||
return
|
||||
}
|
||||
|
||||
batch := batches[0]
|
||||
hash := batch.Hash
|
||||
status := types.ProvingStatus(batch.ProvingStatus)
|
||||
switch status {
|
||||
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
|
||||
@@ -420,51 +290,77 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
// It's an intermediate state. The roller manager received the proof but has not verified
|
||||
// the proof yet. We don't roll up the proof until it's verified.
|
||||
return
|
||||
case types.ProvingTaskFailed, types.ProvingTaskSkipped:
|
||||
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
|
||||
if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
|
||||
}
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", hash)
|
||||
success := false
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
var parentBatch *orm.Batch
|
||||
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
|
||||
// handle unexpected db error
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
|
||||
return
|
||||
rollupStatues := []types.RollupStatus{
|
||||
types.RollupFinalizing,
|
||||
types.RollupFinalized,
|
||||
}
|
||||
previousBatch, err := r.blockBatchOrm.GetLatestBatchByRollupStatus(rollupStatues)
|
||||
// skip submitting proof
|
||||
if err == nil && uint64(batch.CreatedAt.Sub(previousBatch.CreatedAt).Seconds()) < r.cfg.FinalizeBatchIntervalSec {
|
||||
log.Info(
|
||||
"Not enough time passed, skipping",
|
||||
"hash", hash,
|
||||
"createdAt", batch.CreatedAt,
|
||||
"lastFinalizingHash", previousBatch.Hash,
|
||||
"lastFinalizingStatus", previousBatch.RollupStatus,
|
||||
"lastFinalizingCreatedAt", previousBatch.CreatedAt,
|
||||
)
|
||||
|
||||
if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
|
||||
} else {
|
||||
success = true
|
||||
}
|
||||
parentBatchStateRoot = parentBatch.StateRoot
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// handle unexpected db error
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error("Failed to get latest finalized batch", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// TODO: need to revisit this and have a more fine-grained error handling
|
||||
if !success {
|
||||
log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash)
|
||||
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil {
|
||||
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "hash", hash)
|
||||
if err = r.blockBatchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, hash)
|
||||
proofBuffer, icBuffer, err := r.blockBatchOrm.GetVerifiedProofAndInstanceCommitmentsByHash(hash)
|
||||
if err != nil {
|
||||
log.Warn("get verified proof by hash failed", "hash", hash, "err", err)
|
||||
log.Warn("fetch get proof by hash failed", "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
if proofBuffer == nil || icBuffer == nil {
|
||||
log.Warn("proof or instance not ready", "hash", hash)
|
||||
return
|
||||
}
|
||||
if len(proofBuffer)%32 != 0 {
|
||||
log.Error("proof buffer has wrong length", "hash", hash, "length", len(proofBuffer))
|
||||
return
|
||||
}
|
||||
if len(icBuffer)%32 != 0 {
|
||||
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(icBuffer))
|
||||
return
|
||||
}
|
||||
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
log.Warn("agg_proof sanity check fails", "hash", hash, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
proof := utils.BufferToUint256Le(proofBuffer)
|
||||
instance := utils.BufferToUint256Le(icBuffer)
|
||||
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, instance)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return
|
||||
@@ -476,34 +372,53 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
finalizeTxHash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("finalizeBatchWithProof in layer1 failed",
|
||||
"index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
log.Error("finalizeBatchWithProof in layer1 failed", "hash", hash, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
bridgeL2BatchesFinalizedTotalCounter.Inc(1)
|
||||
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
|
||||
log.Info("finalizeBatchWithProof in layer1", "batch_hash", hash, "tx_hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
|
||||
err = r.blockBatchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed",
|
||||
"index", batch.Index, "batch hash", batch.Hash,
|
||||
"tx hash", finalizeTxHash.String(), "err", err)
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", hash, "err", err)
|
||||
}
|
||||
success = true
|
||||
r.processingFinalization.Store(txID, hash)
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status)
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches",
|
||||
"block_status", status,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
transactionType := "Unknown"
|
||||
// check whether it is message relay transaction
|
||||
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
|
||||
transactionType = "MessageRelay"
|
||||
var status types.MsgStatus
|
||||
if confirmation.IsSuccessful {
|
||||
status = types.MsgConfirmed
|
||||
} else {
|
||||
status = types.MsgRelayFailed
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
}
|
||||
// @todo handle db error
|
||||
err := r.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), status, confirmation.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
|
||||
}
|
||||
bridgeL2MsgsRelayedConfirmedTotalCounter.Inc(1)
|
||||
r.processingMessage.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
// check whether it is CommitBatches transaction
|
||||
if batchHash, ok := r.processingCommitment.Load(confirmation.ID); ok {
|
||||
if batchBatches, ok := r.processingBatchesCommitment.Load(confirmation.ID); ok {
|
||||
transactionType = "BatchesCommitment"
|
||||
batchHashes := batchBatches.([]string)
|
||||
var status types.RollupStatus
|
||||
if confirmation.IsSuccessful {
|
||||
status = types.RollupCommitted
|
||||
@@ -511,15 +426,15 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
status = types.RollupCommitFailed
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
}
|
||||
// @todo handle db error
|
||||
err := r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
|
||||
if err != nil {
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed",
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
for _, batchHash := range batchHashes {
|
||||
// @todo handle db error
|
||||
err := r.blockBatchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash, confirmation.TxHash.String(), status)
|
||||
if err != nil {
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_hash", batchHash, "err", err)
|
||||
}
|
||||
}
|
||||
bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(1)
|
||||
r.processingCommitment.Delete(confirmation.ID)
|
||||
bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(int64(len(batchHashes)))
|
||||
r.processingBatchesCommitment.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
// check whether it is proof finalization transaction
|
||||
@@ -532,13 +447,10 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
status = types.RollupFinalizeFailed
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
}
|
||||
|
||||
// @todo handle db error
|
||||
err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
|
||||
err := r.blockBatchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed",
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_hash", batchHash.(string), "err", err)
|
||||
}
|
||||
bridgeL2BatchesFinalizedConfirmedTotalCounter.Inc(1)
|
||||
r.processingFinalization.Delete(confirmation.ID)
|
||||
@@ -558,14 +470,14 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
err := r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
err := r.blockBatchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
|
||||
@@ -2,29 +2,47 @@ package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/big"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
"scroll-tech/bridge/internal/orm/migrate"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
bridgeUtils "scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
templateL2Message = []orm.L2Message{
|
||||
{
|
||||
Nonce: 1,
|
||||
Height: 1,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "100",
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer2Hash: "hash0",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
db, err := bridgeUtils.InitDB(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -34,118 +52,296 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
|
||||
func testCreateNewRelayer(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
func testL2RelayerProcessSaveEvents(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
|
||||
assert.NoError(t, err)
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
assert.NoError(t, err)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*types.Chunk{chunk1, chunk2})
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
err = l2MessageOrm.SaveL2Messages(context.Background(), templateL2Message)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
traces := []*bridgeTypes.WrappedBlock{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(int64(templateL2Message[0].Height)),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
},
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(int64(templateL2Message[0].Height + 1)),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
},
|
||||
}
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
blockTraceOrm := orm.NewBlockTrace(db)
|
||||
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces))
|
||||
blockBatchOrm := orm.NewBlockBatch(db)
|
||||
parentBatch1 := &bridgeTypes.BatchInfo{
|
||||
Index: 0,
|
||||
Hash: common.Hash{}.Hex(),
|
||||
StateRoot: common.Hash{}.Hex(),
|
||||
}
|
||||
batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
|
||||
batchHash := batchData1.Hash().Hex()
|
||||
err = db.Transaction(func(tx *gorm.DB) error {
|
||||
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
if rowsAffected != 1 {
|
||||
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
|
||||
return dbTxErr
|
||||
}
|
||||
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, []uint64{1}, batchHash)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitting, statuses[0])
|
||||
|
||||
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessSavedEvents()
|
||||
|
||||
msg, err := l2MessageOrm.GetL2MessageByNonce(templateL2Message[0].Nonce)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgSubmitted, types.MsgStatus(msg.Status))
|
||||
}
|
||||
|
||||
func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizeFailed, statuses[0])
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
parentBatch1 := &bridgeTypes.BatchInfo{
|
||||
Index: 0,
|
||||
Hash: common.Hash{}.Hex(),
|
||||
StateRoot: common.Hash{}.Hex(),
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
|
||||
blockBatchOrm := orm.NewBlockBatch(db)
|
||||
batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
|
||||
batchHash := batchData1.Hash().Hex()
|
||||
err = db.Transaction(func(tx *gorm.DB) error {
|
||||
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
if rowsAffected != 1 {
|
||||
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
|
||||
return dbTxErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
|
||||
statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
}
|
||||
|
||||
func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
func testL2RelayerSkipBatches(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
blockBatchOrm := orm.NewBlockBatch(db)
|
||||
createBatch := func(rollupStatus types.RollupStatus, provingStatus types.ProvingStatus, index uint64) string {
|
||||
batchData := genBatchData(t, index)
|
||||
err = db.Transaction(func(tx *gorm.DB) error {
|
||||
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
if rowsAffected != 1 {
|
||||
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
|
||||
return dbTxErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchHash := batchData.Hash().Hex()
|
||||
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateProvingStatus(batchHash, provingStatus)
|
||||
assert.NoError(t, err)
|
||||
return batchHash
|
||||
}
|
||||
|
||||
skipped := []string{
|
||||
createBatch(types.RollupCommitted, types.ProvingTaskSkipped, 1),
|
||||
createBatch(types.RollupCommitted, types.ProvingTaskFailed, 2),
|
||||
}
|
||||
|
||||
notSkipped := []string{
|
||||
createBatch(types.RollupPending, types.ProvingTaskSkipped, 3),
|
||||
createBatch(types.RollupCommitting, types.ProvingTaskSkipped, 4),
|
||||
createBatch(types.RollupFinalizing, types.ProvingTaskSkipped, 5),
|
||||
createBatch(types.RollupFinalized, types.ProvingTaskSkipped, 6),
|
||||
createBatch(types.RollupPending, types.ProvingTaskFailed, 7),
|
||||
createBatch(types.RollupCommitting, types.ProvingTaskFailed, 8),
|
||||
createBatch(types.RollupFinalizing, types.ProvingTaskFailed, 9),
|
||||
createBatch(types.RollupFinalized, types.ProvingTaskFailed, 10),
|
||||
createBatch(types.RollupCommitted, types.ProvingTaskVerified, 11),
|
||||
}
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
for _, id := range skipped {
|
||||
statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{id})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizationSkipped, statuses[0])
|
||||
}
|
||||
|
||||
for _, id := range notSkipped {
|
||||
statuses, err := blockBatchOrm.GetRollupStatusByHashList([]string{id})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.NotEqual(t, types.RollupFinalizationSkipped, statuses[0])
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerMsgConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
insertL2Messages := []orm.L2Message{
|
||||
{MsgHash: "msg-1", Nonce: 0},
|
||||
{MsgHash: "msg-2", Nonce: 1},
|
||||
}
|
||||
err := l2MessageOrm.SaveL2Messages(context.Background(), insertL2Messages)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
l2Relayer.processingMessage.Store("msg-1", "msg-1")
|
||||
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-1",
|
||||
IsSuccessful: true,
|
||||
})
|
||||
l2Relayer.processingMessage.Store("msg-2", "msg-2")
|
||||
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-2",
|
||||
IsSuccessful: false,
|
||||
})
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
assert.True(t, utils.TryTimes(5, func() bool {
|
||||
fields1 := map[string]interface{}{"msg_hash": "msg-1"}
|
||||
msg1, err1 := l2MessageOrm.GetL2Messages(fields1, nil, 0)
|
||||
if len(msg1) != 1 {
|
||||
return false
|
||||
}
|
||||
fields2 := map[string]interface{}{"msg_hash": "msg-2"}
|
||||
msg2, err2 := l2MessageOrm.GetL2Messages(fields2, nil, 0)
|
||||
if len(msg2) != 1 {
|
||||
return false
|
||||
}
|
||||
return err1 == nil && types.MsgStatus(msg1[0].Status) == types.MsgConfirmed &&
|
||||
err2 == nil && types.MsgStatus(msg2[0].Status) == types.MsgRelayFailed
|
||||
}))
|
||||
}
|
||||
|
||||
func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
// Insert test data.
|
||||
batches := make([]*bridgeTypes.BatchData, 6)
|
||||
for i := 0; i < 6; i++ {
|
||||
batches[i] = genBatchData(t, uint64(i))
|
||||
}
|
||||
|
||||
blockBatchOrm := orm.NewBlockBatch(db)
|
||||
err := db.Transaction(func(tx *gorm.DB) error {
|
||||
for _, batch := range batches {
|
||||
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batch)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
if rowsAffected != 1 {
|
||||
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
|
||||
return dbTxErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
processingKeys := []string{"committed-1", "committed-2", "finalized-1", "finalized-2"}
|
||||
isSuccessful := []bool{true, false, true, false}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(processingKeys))
|
||||
for i := range batchHashes {
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = batch.Hash
|
||||
}
|
||||
|
||||
for i, key := range processingKeys[:2] {
|
||||
l2Relayer.processingCommitment.Store(key, batchHashes[i])
|
||||
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
|
||||
batchHashes := []string{batches[i*2].Hash().Hex(), batches[i*2+1].Hash().Hex()}
|
||||
l2Relayer.processingBatchesCommitment.Store(key, batchHashes)
|
||||
l2Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
})
|
||||
}
|
||||
|
||||
for i, key := range processingKeys[2:] {
|
||||
l2Relayer.processingFinalization.Store(key, batchHashes[i+2])
|
||||
batchHash := batches[i+4].Hash().Hex()
|
||||
l2Relayer.processingFinalization.Store(key, batchHash)
|
||||
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
IsSuccessful: isSuccessful[i+2],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
TxHash: common.HexToHash("0x56789abcdef1234"),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -153,13 +349,15 @@ func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.RollupStatus{
|
||||
types.RollupCommitted,
|
||||
types.RollupCommitted,
|
||||
types.RollupCommitFailed,
|
||||
types.RollupCommitFailed,
|
||||
types.RollupFinalized,
|
||||
types.RollupFinalizeFailed,
|
||||
}
|
||||
|
||||
for i, batchHash := range batchHashes {
|
||||
batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHash}, nil, 0)
|
||||
for i, batch := range batches[:6] {
|
||||
batchInDB, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}, nil, 0)
|
||||
if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
@@ -171,44 +369,51 @@ func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
|
||||
func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
|
||||
assert.NoError(t, err)
|
||||
// Insert test data.
|
||||
batches := make([]*bridgeTypes.BatchData, 2)
|
||||
for i := 0; i < 2; i++ {
|
||||
batches[i] = genBatchData(t, uint64(i))
|
||||
}
|
||||
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
|
||||
blockBatchOrm := orm.NewBlockBatch(db)
|
||||
err := db.Transaction(func(tx *gorm.DB) error {
|
||||
for _, batch := range batches {
|
||||
rowsAffected, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batch)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
if rowsAffected != 1 {
|
||||
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
|
||||
return dbTxErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
type BatchConfirmation struct {
|
||||
batchHash string
|
||||
isSuccessful bool
|
||||
}
|
||||
|
||||
confirmations := []BatchConfirmation{
|
||||
{batchHash: batch1.Hash, isSuccessful: true},
|
||||
{batchHash: batch2.Hash, isSuccessful: false},
|
||||
}
|
||||
|
||||
for _, confirmation := range confirmations {
|
||||
isSuccessful := []bool{true, false}
|
||||
for i, batch := range batches {
|
||||
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: confirmation.batchHash,
|
||||
IsSuccessful: confirmation.isSuccessful,
|
||||
ID: batch.Hash().Hex(),
|
||||
IsSuccessful: isSuccessful[i],
|
||||
})
|
||||
}
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed}
|
||||
for i, confirmation := range confirmations {
|
||||
gasOracle, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": confirmation.batchHash}, nil, 0)
|
||||
for i, batch := range batches {
|
||||
gasOracle, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batch.Hash().Hex()}, nil, 0)
|
||||
if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
@@ -218,27 +423,42 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func genBatchData(t *testing.T, index uint64) *bridgeTypes.BatchData {
|
||||
templateBlockTrace, err := os.ReadFile("../../../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
// unmarshal blockTrace
|
||||
wrappedBlock := &bridgeTypes.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace, wrappedBlock)
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock.Header.ParentHash = common.HexToHash("0x" + strconv.FormatUint(index+1, 16))
|
||||
parentBatch := &bridgeTypes.BatchInfo{
|
||||
Index: index,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
return bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{wrappedBlock}, nil)
|
||||
}
|
||||
|
||||
func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
|
||||
var batchOrm *orm.Batch
|
||||
var blockBatchOrm *orm.BlockBatch
|
||||
convey.Convey("Failed to GetLatestBatch", t, func() {
|
||||
targetErr := errors.New("GetLatestBatch error")
|
||||
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
|
||||
patchGuard := gomonkey.ApplyMethodFunc(blockBatchOrm, "GetLatestBatch", func() (*orm.BlockBatch, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
|
||||
batch := orm.Batch{
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
patchGuard := gomonkey.ApplyMethodFunc(blockBatchOrm, "GetLatestBatch", func() (*orm.BlockBatch, error) {
|
||||
batch := orm.BlockBatch{
|
||||
OracleStatus: int(types.GasOraclePending),
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
return &batch, nil
|
||||
@@ -283,14 +503,97 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
|
||||
convey.Convey("UpdateGasOracleStatusAndOracleTxHash failed", t, func() {
|
||||
targetErr := errors.New("UpdateL2GasOracleStatusAndOracleTxHash error")
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
return targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
return nil
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
}
|
||||
|
||||
func testLayer2RelayerSendCommitTx(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
|
||||
var batchDataList []*bridgeTypes.BatchData
|
||||
convey.Convey("SendCommitTx receives empty batch", t, func() {
|
||||
err = relayer.SendCommitTx(batchDataList)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
parentBatch := &bridgeTypes.BatchInfo{
|
||||
Index: 0,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
|
||||
traces := []*bridgeTypes.WrappedBlock{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1000),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
},
|
||||
}
|
||||
|
||||
blocks := []*bridgeTypes.WrappedBlock{traces[0]}
|
||||
tmpBatchData := bridgeTypes.NewBatchData(parentBatch, blocks, cfg.L2Config.BatchProposerConfig.PublicInputConfig)
|
||||
batchDataList = append(batchDataList, tmpBatchData)
|
||||
|
||||
var s abi.ABI
|
||||
convey.Convey("Failed to pack commitBatches", t, func() {
|
||||
targetErr := errors.New("commitBatches error")
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s, "Pack", func(name string, args ...interface{}) ([]byte, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
err = relayer.SendCommitTx(batchDataList)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
})
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s, "Pack", func(name string, args ...interface{}) ([]byte, error) {
|
||||
return nil, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
convey.Convey("Failed to send commitBatches tx to layer1", t, func() {
|
||||
targetErr := errors.New("SendTransaction failure")
|
||||
patchGuard.ApplyMethodFunc(relayer.rollupSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
|
||||
return common.Hash{}, targetErr
|
||||
})
|
||||
err = relayer.SendCommitTx(batchDataList)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(relayer.rollupSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
|
||||
return common.HexToHash("0x56789abcdef1234"), nil
|
||||
})
|
||||
|
||||
var blockBatchOrm *orm.BlockBatch
|
||||
convey.Convey("UpdateCommitTxHashAndRollupStatus failed", t, func() {
|
||||
targetErr := errors.New("UpdateCommitTxHashAndRollupStatus failure")
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
|
||||
return targetErr
|
||||
})
|
||||
err = relayer.SendCommitTx(batchDataList)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
|
||||
return nil
|
||||
})
|
||||
err = relayer.SendCommitTx(batchDataList)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -5,15 +5,14 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -25,20 +24,17 @@ var (
|
||||
// l2geth client
|
||||
l2Cli *ethclient.Client
|
||||
|
||||
// l2 block
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
// block trace
|
||||
wrappedBlock1 *bridgeTypes.WrappedBlock
|
||||
wrappedBlock2 *bridgeTypes.WrappedBlock
|
||||
|
||||
// chunk
|
||||
chunk1 *types.Chunk
|
||||
chunk2 *types.Chunk
|
||||
chunkHash1 common.Hash
|
||||
chunkHash2 common.Hash
|
||||
// batch data
|
||||
batchData1 *bridgeTypes.BatchData
|
||||
batchData2 *bridgeTypes.BatchData
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
func setupEnv(t *testing.T) (err error) {
|
||||
// Load config.
|
||||
var err error
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -46,7 +42,7 @@ func setupEnv(t *testing.T) {
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.DBConfig = &database.Config{
|
||||
cfg.DBConfig = &config.DBConfig{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
@@ -58,22 +54,40 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock1 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace1, wrappedBlock1)
|
||||
assert.NoError(t, err)
|
||||
chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
|
||||
chunkHash1, err = chunk1.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
wrappedBlock1 = &bridgeTypes.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
|
||||
return err
|
||||
}
|
||||
parentBatch1 := &bridgeTypes.BatchInfo{
|
||||
Index: 0,
|
||||
Hash: "0x0cc6b102c2924402c14b2e3a19baccc316252bfdc44d9ec62e942d34e39ec729",
|
||||
StateRoot: "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
|
||||
}
|
||||
batchData1 = bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
|
||||
|
||||
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock2 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace2, wrappedBlock2)
|
||||
assert.NoError(t, err)
|
||||
chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
|
||||
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
wrappedBlock2 = &bridgeTypes.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
|
||||
return err
|
||||
}
|
||||
parentBatch2 := &bridgeTypes.BatchInfo{
|
||||
Index: batchData1.Batch.BatchIndex,
|
||||
Hash: batchData1.Hash().Hex(),
|
||||
StateRoot: batchData1.Batch.NewStateRoot.String(),
|
||||
}
|
||||
batchData2 = bridgeTypes.NewBatchData(parentBatch2, []*bridgeTypes.WrappedBlock{wrappedBlock2}, nil)
|
||||
|
||||
log.Info("batchHash", "batchhash1", batchData1.Hash().Hex(), "batchhash2", batchData2.Hash().Hex())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -85,7 +99,9 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
func TestFunctions(t *testing.T) {
|
||||
setupEnv(t)
|
||||
if err := setupEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Run l1 relayer test cases.
|
||||
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
|
||||
t.Run("TestL1RelayerProcessSaveEvents", testL1RelayerProcessSaveEvents)
|
||||
@@ -95,9 +111,12 @@ func TestFunctions(t *testing.T) {
|
||||
|
||||
// Run l2 relayer test cases.
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerSkipBatches", testL2RelayerSkipBatches)
|
||||
t.Run("TestL2RelayerMsgConfirm", testL2RelayerMsgConfirm)
|
||||
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
|
||||
t.Run("TestLayer2RelayerSendCommitTx", testLayer2RelayerSendCommitTx)
|
||||
}
|
||||
|
||||
@@ -343,20 +343,6 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
if gasTipCap.Cmp(feeData.gasTipCap) < 0 {
|
||||
gasTipCap = feeData.gasTipCap
|
||||
}
|
||||
|
||||
// adjust for rising basefee
|
||||
adjBaseFee := big.NewInt(0)
|
||||
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
|
||||
adjBaseFee.SetUint64(feeGas)
|
||||
}
|
||||
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
|
||||
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
|
||||
currentGasFeeCap := new(big.Int).Add(gasTipCap, adjBaseFee)
|
||||
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
|
||||
gasFeeCap = currentGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxGasPrice
|
||||
if gasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
@@ -65,7 +65,6 @@ func TestSender(t *testing.T) {
|
||||
|
||||
t.Run("test min gas limit", testMinGasLimit)
|
||||
t.Run("test resubmit transaction", testResubmitTransaction)
|
||||
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
|
||||
t.Run("test check pending transaction", testCheckPendingTransaction)
|
||||
|
||||
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
|
||||
@@ -155,43 +154,6 @@ func testResubmitTransaction(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
|
||||
txType := "DynamicFeeTx"
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
auth := s.auths.getAccount()
|
||||
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
s.baseFeePerGas = 1000
|
||||
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
// bump the basefee by 10x
|
||||
s.baseFeePerGas *= 10
|
||||
// resubmit and check that the gas fee has been adjusted accordingly
|
||||
newTx, err := s.resubmitTransaction(feeData, auth, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
|
||||
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
|
||||
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
|
||||
|
||||
adjBaseFee := new(big.Int)
|
||||
adjBaseFee.SetUint64(s.baseFeePerGas)
|
||||
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
|
||||
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
|
||||
|
||||
expectedGasFeeCap := new(big.Int).Add(feeData.gasTipCap, adjBaseFee)
|
||||
if expectedGasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
expectedGasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedGasFeeCap.Int64(), newTx.GasFeeCap().Int64())
|
||||
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
func testCheckPendingTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
|
||||
@@ -3,169 +3,391 @@ package watcher
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
)
|
||||
|
||||
// BatchProposer proposes batches based on available unbatched chunks.
|
||||
var (
|
||||
bridgeL2BatchesGasOverThresholdTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/gas/over/threshold/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesTxsOverThresholdTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/txs/over/threshold/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesBlocksCreatedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/blocks/created/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommitsSentTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/commits/sent/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesOversizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/oversized/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesTxsCreatedPerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/txs/created/per/batch", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesGasCreatedPerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/gas/created/per/batch", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesPayloadSizePerBatchGauge = gethMetrics.NewRegisteredGauge("bridge/l2/batches/payload/size/per/batch", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// BatchProposer sends batches commit transactions to relayer.
|
||||
type BatchProposer struct {
|
||||
ctx context.Context
|
||||
db *gorm.DB
|
||||
mutex sync.Mutex
|
||||
ctx context.Context
|
||||
db *gorm.DB
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
batchTimeSec uint64
|
||||
batchGasThreshold uint64
|
||||
batchTxNumThreshold uint64
|
||||
batchBlocksLimit uint64
|
||||
batchCommitTimeSec uint64
|
||||
commitCalldataSizeLimit uint64
|
||||
batchDataBufferSizeLimit uint64
|
||||
commitCalldataMinSize uint64
|
||||
|
||||
maxChunkNumPerBatch uint64
|
||||
maxL1CommitGasPerBatch uint64
|
||||
maxL1CommitCalldataSizePerBatch uint32
|
||||
minChunkNumPerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
proofGenerationFreq uint64
|
||||
batchDataBuffer []*bridgeTypes.BatchData
|
||||
relayer *relayer.Layer2Relayer
|
||||
|
||||
blockBatchOrm *orm.BlockBatch
|
||||
blockTraceOrm *orm.BlockTrace
|
||||
|
||||
piCfg *bridgeTypes.PublicInputHashConfig
|
||||
}
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB) *BatchProposer {
|
||||
return &BatchProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
batchOrm: orm.NewBatch(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
|
||||
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
|
||||
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
// NewBatchProposer will return a new instance of BatchProposer.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, relayer *relayer.Layer2Relayer, db *gorm.DB) *BatchProposer {
|
||||
p := &BatchProposer{
|
||||
mutex: sync.Mutex{},
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
blockBatchOrm: orm.NewBlockBatch(db),
|
||||
blockTraceOrm: orm.NewBlockTrace(db),
|
||||
batchTimeSec: cfg.BatchTimeSec,
|
||||
batchGasThreshold: cfg.BatchGasThreshold,
|
||||
batchTxNumThreshold: cfg.BatchTxNumThreshold,
|
||||
batchBlocksLimit: cfg.BatchBlocksLimit,
|
||||
batchCommitTimeSec: cfg.BatchCommitTimeSec,
|
||||
commitCalldataSizeLimit: cfg.CommitTxCalldataSizeLimit,
|
||||
commitCalldataMinSize: cfg.CommitTxCalldataMinSize,
|
||||
batchDataBufferSizeLimit: 100*cfg.CommitTxCalldataSizeLimit + 1*1024*1024, // @todo: determine the value.
|
||||
proofGenerationFreq: cfg.ProofGenerationFreq,
|
||||
piCfg: cfg.PublicInputConfig,
|
||||
relayer: relayer,
|
||||
}
|
||||
|
||||
// for graceful restart.
|
||||
p.recoverBatchDataBuffer()
|
||||
|
||||
// try to commit the leftover pending batches
|
||||
p.TryCommitBatches()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// TryProposeBatch tries to propose a new batches.
|
||||
func (p *BatchProposer) TryProposeBatch() {
|
||||
dbChunks, err := p.proposeBatchChunks()
|
||||
func (p *BatchProposer) recoverBatchDataBuffer() {
|
||||
// batches are sorted by batch index in increasing order
|
||||
batchHashes, err := p.blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32)
|
||||
if err != nil {
|
||||
log.Error("proposeBatchChunks failed", "err", err)
|
||||
log.Crit("Failed to fetch pending L2 batches", "err", err)
|
||||
}
|
||||
if len(batchHashes) == 0 {
|
||||
return
|
||||
}
|
||||
if err := p.updateBatchInfoInDB(dbChunks); err != nil {
|
||||
log.Error("update batch info in db failed", "err", err)
|
||||
log.Info("Load pending batches into batchDataBuffer")
|
||||
|
||||
// helper function to cache and get BlockBatch from DB
|
||||
blockBatchCache := make(map[string]orm.BlockBatch)
|
||||
getBlockBatch := func(batchHash string) (*orm.BlockBatch, error) {
|
||||
if blockBatch, ok := blockBatchCache[batchHash]; ok {
|
||||
return &blockBatch, nil
|
||||
}
|
||||
blockBatches, err := p.blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchHash}, nil, 0)
|
||||
if err != nil || len(blockBatches) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
blockBatchCache[batchHash] = blockBatches[0]
|
||||
return &blockBatches[0], nil
|
||||
}
|
||||
|
||||
// recover the in-memory batchData from DB
|
||||
for _, batchHash := range batchHashes {
|
||||
log.Info("recover batch data from pending batch", "batch_hash", batchHash)
|
||||
blockBatch, err := getBlockBatch(batchHash)
|
||||
if err != nil {
|
||||
log.Error("could not get BlockBatch", "batch_hash", batchHash, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
parentBatch, err := getBlockBatch(blockBatch.ParentHash)
|
||||
if err != nil {
|
||||
log.Error("could not get parent BlockBatch", "batch_hash", batchHash, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
whereFileds := map[string]interface{}{
|
||||
"batch_hash": batchHash,
|
||||
}
|
||||
orderByList := []string{
|
||||
"number ASC",
|
||||
}
|
||||
|
||||
blockTraces, err := p.blockTraceOrm.GetL2BlockInfos(whereFileds, orderByList, 0)
|
||||
if err != nil {
|
||||
log.Error("could not GetL2BlockInfos", "batch_hash", batchHash, "error", err)
|
||||
continue
|
||||
}
|
||||
if len(blockTraces) != int(blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1) {
|
||||
log.Error("the number of block info retrieved from DB mistmatches the batch info in the DB",
|
||||
"len(blockInfos)", len(blockTraces),
|
||||
"expected", blockBatch.EndBlockNumber-blockBatch.StartBlockNumber+1)
|
||||
continue
|
||||
}
|
||||
|
||||
batchData, err := p.generateBatchData(parentBatch, blockTraces)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if batchData.Hash().Hex() != batchHash {
|
||||
log.Error("the hash from recovered batch data mismatches the DB entry",
|
||||
"recovered_batch_hash", batchData.Hash().Hex(),
|
||||
"expected", batchHash)
|
||||
continue
|
||||
}
|
||||
|
||||
p.batchDataBuffer = append(p.batchDataBuffer, batchData)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
numChunks := len(dbChunks)
|
||||
if numChunks <= 0 {
|
||||
return nil
|
||||
// TryProposeBatch will try to propose a batch.
|
||||
func (p *BatchProposer) TryProposeBatch() {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
for p.getBatchDataBufferSize() < p.batchDataBufferSizeLimit {
|
||||
orderBy := []string{"number ASC"}
|
||||
blockTraces, err := p.blockTraceOrm.GetUnbatchedL2Blocks(map[string]interface{}{}, orderBy, int(p.batchBlocksLimit))
|
||||
if err != nil {
|
||||
log.Error("failed to get unbatched blocks", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
batchCreated := p.proposeBatch(blockTraces)
|
||||
|
||||
// while size of batchDataBuffer < commitCalldataMinSize,
|
||||
// proposer keeps fetching and porposing batches.
|
||||
if p.getBatchDataBufferSize() >= p.commitCalldataMinSize {
|
||||
return
|
||||
}
|
||||
|
||||
if !batchCreated {
|
||||
// wait for watcher to insert l2 traces.
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
chunks, err := p.dbChunksToBridgeChunks(dbChunks)
|
||||
}
|
||||
|
||||
// TryCommitBatches will try to commit the pending batches.
|
||||
func (p *BatchProposer) TryCommitBatches() {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
if len(p.batchDataBuffer) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// estimate the calldata length to determine whether to commit the pending batches
|
||||
index := 0
|
||||
commit := false
|
||||
calldataByteLen := uint64(0)
|
||||
for ; index < len(p.batchDataBuffer); index++ {
|
||||
calldataByteLen += bridgeAbi.GetBatchCalldataLength(&p.batchDataBuffer[index].Batch)
|
||||
if calldataByteLen > p.commitCalldataSizeLimit {
|
||||
commit = true
|
||||
if index == 0 {
|
||||
log.Warn(
|
||||
"The calldata size of one batch is larger than the threshold",
|
||||
"batch_hash", p.batchDataBuffer[0].Hash().Hex(),
|
||||
"calldata_size", calldataByteLen,
|
||||
)
|
||||
index = 1
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !commit && p.batchDataBuffer[0].Timestamp()+p.batchCommitTimeSec > uint64(time.Now().Unix()) {
|
||||
return
|
||||
}
|
||||
|
||||
// Send commit tx for batchDataBuffer[0:index]
|
||||
log.Info("Commit batches", "start_index", p.batchDataBuffer[0].Batch.BatchIndex,
|
||||
"end_index", p.batchDataBuffer[index-1].Batch.BatchIndex)
|
||||
err := p.relayer.SendCommitTx(p.batchDataBuffer[:index])
|
||||
if err != nil {
|
||||
// leave the retry to the next ticker
|
||||
log.Error("SendCommitTx failed", "error", err)
|
||||
} else {
|
||||
// pop the processed batches from the buffer
|
||||
bridgeL2BatchesCommitsSentTotalCounter.Inc(1)
|
||||
p.batchDataBuffer = p.batchDataBuffer[index:]
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) proposeBatch(blockTraces []orm.BlockTrace) bool {
|
||||
if len(blockTraces) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
approximatePayloadSize := func(hash string) (uint64, error) {
|
||||
traces, err := p.blockTraceOrm.GetL2WrappedBlocks(map[string]interface{}{"hash": hash})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(traces) != 1 {
|
||||
return 0, fmt.Errorf("unexpected traces length, expected = 1, actual = %d", len(traces))
|
||||
}
|
||||
size := 0
|
||||
for _, tx := range traces[0].Transactions {
|
||||
size += len(tx.Data)
|
||||
}
|
||||
return uint64(size), nil
|
||||
}
|
||||
|
||||
firstSize, err := approximatePayloadSize(blockTraces[0].Hash)
|
||||
if err != nil {
|
||||
log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
if firstSize > p.commitCalldataSizeLimit {
|
||||
log.Warn("oversized payload even for only 1 block", "height", blockTraces[0].Number, "size", firstSize)
|
||||
// note: we should probably fail here once we can ensure this will not happen
|
||||
if err := p.createBatchForBlocks(blockTraces[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
|
||||
return false
|
||||
}
|
||||
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum))
|
||||
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed))
|
||||
bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize))
|
||||
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
|
||||
bridgeL2BatchesOversizedTotalCounter.Inc(1)
|
||||
return true
|
||||
}
|
||||
|
||||
if blockTraces[0].GasUsed > p.batchGasThreshold {
|
||||
bridgeL2BatchesGasOverThresholdTotalCounter.Inc(1)
|
||||
log.Warn("gas overflow even for only 1 block", "height", blockTraces[0].Number, "gas", blockTraces[0].GasUsed)
|
||||
if err := p.createBatchForBlocks(blockTraces[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
|
||||
} else {
|
||||
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum))
|
||||
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed))
|
||||
bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize))
|
||||
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if blockTraces[0].TxNum > p.batchTxNumThreshold {
|
||||
bridgeL2BatchesTxsOverThresholdTotalCounter.Inc(1)
|
||||
log.Warn("too many txs even for only 1 block", "height", blockTraces[0].Number, "tx_num", blockTraces[0].TxNum)
|
||||
if err := p.createBatchForBlocks(blockTraces[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blockTraces[0].Number, "err", err)
|
||||
} else {
|
||||
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(blockTraces[0].TxNum))
|
||||
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(blockTraces[0].GasUsed))
|
||||
bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(firstSize))
|
||||
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(1)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var gasUsed, txNum, payloadSize uint64
|
||||
reachThreshold := false
|
||||
// add blocks into batch until reach batchGasThreshold
|
||||
for i, block := range blockTraces {
|
||||
size, err := approximatePayloadSize(block.Hash)
|
||||
if err != nil {
|
||||
log.Error("failed to create batch", "number", block.Number, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
if (gasUsed+block.GasUsed > p.batchGasThreshold) || (txNum+block.TxNum > p.batchTxNumThreshold) || (payloadSize+size > p.commitCalldataSizeLimit) {
|
||||
blockTraces = blockTraces[:i]
|
||||
reachThreshold = true
|
||||
break
|
||||
}
|
||||
gasUsed += block.GasUsed
|
||||
txNum += block.TxNum
|
||||
payloadSize += size
|
||||
}
|
||||
|
||||
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
|
||||
// if it's not old enough we will skip proposing the batch,
|
||||
// otherwise we will still propose a batch
|
||||
if !reachThreshold && blockTraces[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) {
|
||||
return false
|
||||
}
|
||||
|
||||
if err := p.createBatchForBlocks(blockTraces); err != nil {
|
||||
log.Error("failed to create batch", "from", blockTraces[0].Number, "to", blockTraces[len(blockTraces)-1].Number, "err", err)
|
||||
} else {
|
||||
bridgeL2BatchesTxsCreatedPerBatchGauge.Update(int64(txNum))
|
||||
bridgeL2BatchesGasCreatedPerBatchGauge.Update(int64(gasUsed))
|
||||
bridgeL2BatchesPayloadSizePerBatchGauge.Update(int64(payloadSize))
|
||||
bridgeL2BatchesBlocksCreatedTotalCounter.Inc(int64(len(blockTraces)))
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *BatchProposer) createBatchForBlocks(blocks []orm.BlockTrace) error {
|
||||
lastBatch, err := p.blockBatchOrm.GetLatestBatch()
|
||||
if err != nil {
|
||||
// We should not receive sql.ErrNoRows error. The DB should have the batch entry that contains the genesis block.
|
||||
return err
|
||||
}
|
||||
|
||||
startChunkIndex := dbChunks[0].Index
|
||||
startChunkHash := dbChunks[0].Hash
|
||||
endChunkIndex := dbChunks[numChunks-1].Index
|
||||
endChunkHash := dbChunks[numChunks-1].Hash
|
||||
err = p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
|
||||
if dbErr != nil {
|
||||
return dbErr
|
||||
}
|
||||
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
|
||||
if dbErr != nil {
|
||||
return dbErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
dbChunks, err := p.chunkOrm.GetUnbatchedChunks(p.ctx)
|
||||
batchData, err := p.generateBatchData(lastBatch, blocks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Error("createBatchData failed", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dbChunks) == 0 {
|
||||
return nil, nil
|
||||
if err := orm.AddBatchInfoToDB(p.db, batchData); err != nil {
|
||||
log.Error("addBatchInfoToDB failed", "BatchHash", batchData.Hash(), "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
firstChunk := dbChunks[0]
|
||||
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas := firstChunk.TotalL1CommitGas
|
||||
var totalChunks uint64 = 1
|
||||
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
if totalL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
firstChunk.StartBlockNumber,
|
||||
firstChunk.EndBlockNumber,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerBatch,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
|
||||
firstChunk.StartBlockNumber,
|
||||
firstChunk.EndBlockNumber,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerBatch,
|
||||
)
|
||||
}
|
||||
|
||||
for i, chunk := range dbChunks[1:] {
|
||||
totalChunks++
|
||||
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas += chunk.TotalL1CommitGas
|
||||
if totalChunks > p.maxChunkNumPerBatch ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
totalL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
return dbChunks[:i+1], nil
|
||||
}
|
||||
}
|
||||
|
||||
var hasChunkTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"start block number", dbChunks[0].StartBlockNumber,
|
||||
"first block timestamp", dbChunks[0].StartBlockTime,
|
||||
"chunk outdated time threshold", currentTimeSec,
|
||||
)
|
||||
hasChunkTimeout = true
|
||||
}
|
||||
|
||||
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
|
||||
log.Warn("The chunk number of the batch is less than the minimum limit",
|
||||
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
return dbChunks, nil
|
||||
p.batchDataBuffer = append(p.batchDataBuffer, batchData)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
|
||||
chunks := make([]*types.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
wrappedBlocks, err := p.l2BlockOrm.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch wrapped blocks",
|
||||
"start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
|
||||
func (p *BatchProposer) generateBatchData(parentBatch *orm.BlockBatch, blocks []orm.BlockTrace) (*bridgeTypes.BatchData, error) {
|
||||
var wrappedBlocks []*bridgeTypes.WrappedBlock
|
||||
for _, block := range blocks {
|
||||
trs, err := p.blockTraceOrm.GetL2WrappedBlocks(map[string]interface{}{"hash": block.Hash})
|
||||
if err != nil || len(trs) != 1 {
|
||||
log.Error("Failed to GetBlockTraces", "hash", block.Hash, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
chunks[i] = &types.Chunk{
|
||||
Blocks: wrappedBlocks,
|
||||
}
|
||||
|
||||
wrappedBlocks = append(wrappedBlocks, trs[0])
|
||||
}
|
||||
return chunks, nil
|
||||
|
||||
parentBatchInfo := bridgeTypes.BatchInfo{
|
||||
Index: parentBatch.Index,
|
||||
Hash: parentBatch.Hash,
|
||||
StateRoot: parentBatch.StateRoot,
|
||||
}
|
||||
return bridgeTypes.NewBatchData(&parentBatchInfo, wrappedBlocks, p.piCfg), nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) getBatchDataBufferSize() (size uint64) {
|
||||
for _, batchData := range p.batchDataBuffer {
|
||||
size += bridgeAbi.GetBatchCalldataLength(&batchData.Batch)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2,70 +2,205 @@ package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTtypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
bridgeUtils "scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
// TODO: Add unit tests that the limits are enforced correctly.
|
||||
func testBatchProposer(t *testing.T) {
|
||||
func testBatchProposerProposeBatch(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
p := &BatchProposer{
|
||||
batchGasThreshold: 1000,
|
||||
batchTxNumThreshold: 10,
|
||||
batchTimeSec: 300,
|
||||
commitCalldataSizeLimit: 500,
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
var blockTrace *orm.BlockTrace
|
||||
patchGuard := gomonkey.ApplyMethodFunc(blockTrace, "GetL2WrappedBlocks", func(fields map[string]interface{}) ([]*bridgeTypes.WrappedBlock, error) {
|
||||
hash, _ := fields["hash"].(string)
|
||||
if hash == "blockWithLongData" {
|
||||
longData := strings.Repeat("0", 1000)
|
||||
return []*bridgeTypes.WrappedBlock{{
|
||||
Transactions: []*gethTtypes.TransactionData{{
|
||||
Data: longData,
|
||||
}},
|
||||
}}, nil
|
||||
}
|
||||
return []*bridgeTypes.WrappedBlock{{
|
||||
Transactions: []*gethTtypes.TransactionData{{
|
||||
Data: "short",
|
||||
}},
|
||||
}}, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
patchGuard.ApplyPrivateMethod(p, "createBatchForBlocks", func(*BatchProposer, []*types.BlockInfo) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
MinChunkNumPerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db)
|
||||
bp.TryProposeBatch()
|
||||
block1 := orm.BlockTrace{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix()) - 200}
|
||||
block2 := orm.BlockTrace{Number: 2, GasUsed: 200, TxNum: 2, BlockTimestamp: uint64(time.Now().Unix())}
|
||||
block3 := orm.BlockTrace{Number: 3, GasUsed: 300, TxNum: 11, BlockTimestamp: uint64(time.Now().Unix())}
|
||||
block4 := orm.BlockTrace{Number: 4, GasUsed: 1001, TxNum: 3, BlockTimestamp: uint64(time.Now().Unix())}
|
||||
blockOutdated := orm.BlockTrace{Number: 1, GasUsed: 100, TxNum: 1, BlockTimestamp: uint64(time.Now().Add(-400 * time.Second).Unix())}
|
||||
blockWithLongData := orm.BlockTrace{Hash: "blockWithLongData", Number: 5, GasUsed: 500, TxNum: 1, BlockTimestamp: uint64(time.Now().Unix())}
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, chunks)
|
||||
testCases := []struct {
|
||||
description string
|
||||
blocks []orm.BlockTrace
|
||||
expectedRes bool
|
||||
}{
|
||||
{"Empty block list", []orm.BlockTrace{}, false},
|
||||
{"Single block exceeding gas threshold", []orm.BlockTrace{block4}, true},
|
||||
{"Single block exceeding transaction number threshold", []orm.BlockTrace{block3}, true},
|
||||
{"Multiple blocks meeting thresholds", []orm.BlockTrace{block1, block2, block3}, true},
|
||||
{"Multiple blocks not meeting thresholds", []orm.BlockTrace{block1, block2}, false},
|
||||
{"Outdated and valid block", []orm.BlockTrace{blockOutdated, block2}, true},
|
||||
{"Single block with long data", []orm.BlockTrace{blockWithLongData}, true},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
// get all batches.
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(0), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
assert.Equal(t, batches[0].Hash, dbChunks[0].BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(dbChunks[0].ProvingStatus))
|
||||
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
blocks, err := blockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 2)
|
||||
assert.Equal(t, dbChunks[0].Hash, blocks[0].ChunkHash)
|
||||
assert.Equal(t, dbChunks[0].Hash, blocks[1].ChunkHash)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
assert.Equal(t, tc.expectedRes, p.proposeBatch(tc.blocks), "Failed on: %s", tc.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerBatchGeneration(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
bridgeUtils.CloseDB(db)
|
||||
cancel()
|
||||
}()
|
||||
blockTraceOrm := orm.NewBlockTrace(db)
|
||||
// Insert traces into db.
|
||||
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks([]*bridgeTypes.WrappedBlock{wrappedBlock1}))
|
||||
|
||||
l2cfg := cfg.L2Config
|
||||
wc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
blockBatchOrm := orm.NewBlockBatch(db)
|
||||
batch, err := blockBatchOrm.GetLatestBatch()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a new batch.
|
||||
batchData := bridgeTypes.NewBatchData(&bridgeTypes.BatchInfo{
|
||||
Index: 0,
|
||||
Hash: batch.Hash,
|
||||
StateRoot: batch.StateRoot,
|
||||
}, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
|
||||
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
}, relayer, db)
|
||||
proposer.TryProposeBatch()
|
||||
|
||||
infos, err := blockTraceOrm.GetUnbatchedL2Blocks(map[string]interface{}{}, []string{"number ASC"}, 100)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(infos))
|
||||
|
||||
batches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchData.Hash().Hex()}, nil, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(batches))
|
||||
}
|
||||
|
||||
func testBatchProposerGracefulRestart(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer bridgeUtils.CloseDB(db)
|
||||
|
||||
relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
blockTraceOrm := orm.NewBlockTrace(db)
|
||||
// Insert traces into db.
|
||||
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks([]*bridgeTypes.WrappedBlock{wrappedBlock2}))
|
||||
|
||||
// Insert block batch into db.
|
||||
parentBatch1 := &bridgeTypes.BatchInfo{
|
||||
Index: 0,
|
||||
Hash: common.Hash{}.String(),
|
||||
StateRoot: common.Hash{}.String(),
|
||||
}
|
||||
batchData1 := bridgeTypes.NewBatchData(parentBatch1, []*bridgeTypes.WrappedBlock{wrappedBlock1}, nil)
|
||||
|
||||
parentBatch2 := &bridgeTypes.BatchInfo{
|
||||
Index: batchData1.Batch.BatchIndex,
|
||||
Hash: batchData1.Hash().Hex(),
|
||||
StateRoot: batchData1.Batch.NewStateRoot.String(),
|
||||
}
|
||||
batchData2 := bridgeTypes.NewBatchData(parentBatch2, []*bridgeTypes.WrappedBlock{wrappedBlock2}, nil)
|
||||
|
||||
blockBatchOrm := orm.NewBlockBatch(db)
|
||||
err = db.Transaction(func(tx *gorm.DB) error {
|
||||
_, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData1)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
_, dbTxErr = blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData2)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
numbers1 := []uint64{batchData1.Batch.Blocks[0].BlockNumber}
|
||||
hash1 := batchData1.Hash().Hex()
|
||||
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, numbers1, hash1)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
numbers2 := []uint64{batchData2.Batch.Blocks[0].BlockNumber}
|
||||
hash2 := batchData2.Hash().Hex()
|
||||
dbTxErr = blockTraceOrm.UpdateBatchHashForL2Blocks(tx, numbers2, hash2)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchData1.Hash().Hex(), types.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
batchHashes, err := blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(batchHashes))
|
||||
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
|
||||
// test p.recoverBatchDataBuffer().
|
||||
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
}, relayer, db)
|
||||
|
||||
batchHashes, err = blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(batchHashes))
|
||||
|
||||
batches, err := blockBatchOrm.GetBlockBatches(map[string]interface{}{"hash": batchData2.Hash().Hex()}, nil, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(batches))
|
||||
}
|
||||
|
||||
@@ -1,169 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// ChunkProposer proposes chunks based on available unchunked blocks.
|
||||
type ChunkProposer struct {
|
||||
ctx context.Context
|
||||
db *gorm.DB
|
||||
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
maxTxGasPerChunk uint64
|
||||
maxL2TxNumPerChunk uint64
|
||||
maxL1CommitGasPerChunk uint64
|
||||
maxL1CommitCalldataSizePerChunk uint64
|
||||
minL1CommitCalldataSizePerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
}
|
||||
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB) *ChunkProposer {
|
||||
return &ChunkProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxTxGasPerChunk: cfg.MaxTxGasPerChunk,
|
||||
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
|
||||
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
|
||||
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
}
|
||||
}
|
||||
|
||||
// TryProposeChunk tries to propose a new chunk.
|
||||
func (p *ChunkProposer) TryProposeChunk() {
|
||||
proposedChunk, err := p.proposeChunk()
|
||||
if err != nil {
|
||||
log.Error("propose new chunk failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
|
||||
log.Error("update chunk info in orm failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
|
||||
if chunk == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
|
||||
log.Error("failed to update chunk_hash for l2_blocks", "chunk hash", chunk.Hash, "start block", 0, "end block", 0, "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
firstBlock := blocks[0]
|
||||
totalTxGasUsed := firstBlock.Header.GasUsed
|
||||
totalL2TxNum := firstBlock.L2TxsNum()
|
||||
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas := firstBlock.EstimateL1CommitGas()
|
||||
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL2TxNum,
|
||||
p.maxL2TxNumPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitGas > p.maxL1CommitGasPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
// Check if the first block breaks any soft limits.
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk {
|
||||
log.Warn(
|
||||
"The first block in chunk exceeds l2 tx gas limit",
|
||||
"block number", firstBlock.Header.Number,
|
||||
"gas used", totalTxGasUsed,
|
||||
"max gas limit", p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
for i, block := range blocks[1:] {
|
||||
totalTxGasUsed += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas += block.EstimateL1CommitGas()
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk ||
|
||||
totalL2TxNum > p.maxL2TxNumPerChunk ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
totalL1CommitGas > p.maxL1CommitGasPerChunk {
|
||||
blocks = blocks[:i+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var hasBlockTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"block number", blocks[0].Header.Number,
|
||||
"block timestamp", blocks[0].Header.Time,
|
||||
"block outdated time threshold", currentTimeSec,
|
||||
)
|
||||
hasBlockTimeout = true
|
||||
}
|
||||
|
||||
if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk {
|
||||
log.Warn("The calldata size of the chunk is less than the minimum limit",
|
||||
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
return &types.Chunk{Blocks: blocks}, nil
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// TODO: Add unit tests that the limits are enforced correctly.
|
||||
func testChunkProposer(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
expectedChunk := &types.Chunk{
|
||||
Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2},
|
||||
}
|
||||
expectedHash, err := expectedChunk.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
assert.Equal(t, expectedHash.Hex(), chunks[0].Hash)
|
||||
}
|
||||
@@ -24,9 +24,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
type rollupEvent struct {
|
||||
@@ -40,8 +41,9 @@ type L1WatcherClient struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
l1MessageOrm *orm.L1Message
|
||||
l2MessageOrm *orm.L2Message
|
||||
l1BlockOrm *orm.L1Block
|
||||
batchOrm *orm.Batch
|
||||
l1BatchOrm *orm.BlockBatch
|
||||
|
||||
// The number of new blocks to wait for a block to be confirmed
|
||||
confirmations rpc.BlockNumber
|
||||
@@ -74,7 +76,7 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
}
|
||||
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(ctx)
|
||||
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
|
||||
savedL1BlockHeight = 0
|
||||
@@ -88,7 +90,8 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
client: client,
|
||||
l1MessageOrm: l1MessageOrm,
|
||||
l1BlockOrm: l1BlockOrm,
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l1BatchOrm: orm.NewBlockBatch(db),
|
||||
l2MessageOrm: orm.NewL2Message(db),
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
@@ -149,10 +152,9 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
baseFee = block.BaseFee.Uint64()
|
||||
}
|
||||
blocks = append(blocks, orm.L1Block{
|
||||
Number: uint64(height),
|
||||
Hash: block.Hash().String(),
|
||||
BaseFee: baseFee,
|
||||
GasOracleStatus: int16(types.GasOraclePending),
|
||||
Number: uint64(height),
|
||||
Hash: block.Hash().String(),
|
||||
BaseFee: baseFee,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -225,23 +227,25 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("Failed to parse emitted events log", "err", err)
|
||||
return err
|
||||
}
|
||||
sentMessageCount := int64(len(sentMessageEvents))
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
rollupEventCount := int64(len(rollupEvents))
|
||||
bridgeL1MsgsSentEventsTotalCounter.Inc(sentMessageCount)
|
||||
bridgeL1MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
|
||||
bridgeL1MsgsRollupEventsTotalCounter.Inc(rollupEventCount)
|
||||
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RollupEventCount", rollupEventCount)
|
||||
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RelayedMessageCount", relayedMessageCount, "RollupEventCount", rollupEventCount)
|
||||
|
||||
// use rollup event to update rollup results db status
|
||||
var batchHashes []string
|
||||
for _, event := range rollupEvents {
|
||||
batchHashes = append(batchHashes, event.batchHash.String())
|
||||
}
|
||||
statuses, err := w.batchOrm.GetRollupStatusByHashList(w.ctx, batchHashes)
|
||||
statuses, err := w.l1BatchOrm.GetRollupStatusByHashList(batchHashes)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetRollupStatusByHashList", "err", err)
|
||||
return err
|
||||
@@ -257,9 +261,9 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
// only update when db status is before event status
|
||||
if event.status > status {
|
||||
if event.status == types.RollupFinalized {
|
||||
err = w.batchOrm.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
|
||||
err = w.l1BatchOrm.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
|
||||
} else if event.status == types.RollupCommitted {
|
||||
err = w.batchOrm.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
|
||||
err = w.l1BatchOrm.UpdateCommitTxHashAndRollupStatus(w.ctx, batchHash, event.txHash.String(), event.status)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
|
||||
@@ -268,6 +272,21 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submitted message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
for _, msg := range relayedMessageEvents {
|
||||
var msgStatus types.MsgStatus
|
||||
if msg.isSuccessful {
|
||||
msgStatus = types.MsgConfirmed
|
||||
} else {
|
||||
msgStatus = types.MsgFailed
|
||||
}
|
||||
if err = w.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), msgStatus, msg.txHash.String()); err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.l1MessageOrm.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -279,10 +298,11 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1Message, []rollupEvent, error) {
|
||||
func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
var l1Messages []*orm.L1Message
|
||||
var relayedMessages []relayedMessage
|
||||
var rollupEvents []rollupEvent
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
@@ -291,13 +311,13 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
err := utils.UnpackLog(w.messageQueueABI, &event, "QueueTransaction", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 QueueTransaction event", "err", err)
|
||||
return l1Messages, rollupEvents, err
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
msgHash := common.BytesToHash(crypto.Keccak256(event.Data))
|
||||
|
||||
l1Messages = append(l1Messages, &orm.L1Message{
|
||||
QueueIndex: event.QueueIndex,
|
||||
QueueIndex: event.QueueIndex.Uint64(),
|
||||
MsgHash: msgHash.String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
@@ -307,12 +327,38 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
GasLimit: event.GasLimit.Uint64(),
|
||||
Layer1Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case bridgeAbi.L1RelayedMessageEventSignature:
|
||||
event := bridgeAbi.L1RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 RelayedMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MessageHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case bridgeAbi.L1FailedRelayedMessageEventSignature:
|
||||
event := bridgeAbi.L1FailedRelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 FailedRelayedMessage event", "err", err)
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
msgHash: event.MessageHash,
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case bridgeAbi.L1CommitBatchEventSignature:
|
||||
event := bridgeAbi.L1CommitBatchEvent{}
|
||||
err := utils.UnpackLog(w.scrollChainABI, &event, "CommitBatch", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 CommitBatch event", "err", err)
|
||||
return l1Messages, rollupEvents, err
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
rollupEvents = append(rollupEvents, rollupEvent{
|
||||
@@ -325,7 +371,7 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
err := utils.UnpackLog(w.scrollChainABI, &event, "FinalizeBatch", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer1 FinalizeBatch event", "err", err)
|
||||
return l1Messages, rollupEvents, err
|
||||
return l1Messages, relayedMessages, rollupEvents, err
|
||||
}
|
||||
|
||||
rollupEvents = append(rollupEvents, rollupEvent{
|
||||
@@ -338,5 +384,5 @@ func (w *L1WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]*orm.L1M
|
||||
}
|
||||
}
|
||||
|
||||
return l1Messages, rollupEvents, nil
|
||||
return l1Messages, relayedMessages, rollupEvents, nil
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
commonTypes "scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -37,13 +36,13 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
|
||||
func testFetchContractEvent(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
assert.NoError(t, watcher.FetchContractEvent())
|
||||
}
|
||||
|
||||
func testL1WatcherClientFetchBlockHeader(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
convey.Convey("test toBlock < fromBlock", t, func() {
|
||||
var blockHeight uint64
|
||||
if watcher.ProcessedBlockHeight() <= 0 {
|
||||
@@ -115,7 +114,7 @@ func testL1WatcherClientFetchBlockHeader(t *testing.T) {
|
||||
|
||||
func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
watcher.SetConfirmations(rpc.SafeBlockNumber)
|
||||
convey.Convey("get latest confirmed block number failure", t, func() {
|
||||
@@ -160,14 +159,14 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
|
||||
convey.Convey("parse bridge event logs failure", t, func() {
|
||||
targetErr := errors.New("parse log failure")
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []rollupEvent, error) {
|
||||
return nil, nil, targetErr
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
return nil, nil, nil, targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Equal(t, err.Error(), targetErr.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []rollupEvent, error) {
|
||||
patchGuard.ApplyPrivateMethod(watcher, "parseBridgeEventLogs", func(*L1WatcherClient, []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
|
||||
rollupEvents := []rollupEvent{
|
||||
{
|
||||
batchHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
@@ -180,13 +179,26 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
status: commonTypes.RollupCommitted,
|
||||
},
|
||||
}
|
||||
return nil, rollupEvents, nil
|
||||
|
||||
relayedMessageEvents := []relayedMessage{
|
||||
{
|
||||
msgHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
txHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
|
||||
isSuccessful: true,
|
||||
},
|
||||
{
|
||||
msgHash: common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"),
|
||||
txHash: common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"),
|
||||
isSuccessful: false,
|
||||
},
|
||||
}
|
||||
return nil, relayedMessageEvents, rollupEvents, nil
|
||||
})
|
||||
|
||||
var batchOrm *orm.Batch
|
||||
var blockBatchOrm *orm.BlockBatch
|
||||
convey.Convey("db get rollup status by hash list failure", t, func() {
|
||||
targetErr := errors.New("get db failure")
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "GetRollupStatusByHashList", func(context.Context, []string) ([]commonTypes.RollupStatus, error) {
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
@@ -194,7 +206,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
})
|
||||
|
||||
convey.Convey("rollup status mismatch batch hashes length", t, func() {
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "GetRollupStatusByHashList", func(context.Context, []string) ([]commonTypes.RollupStatus, error) {
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
|
||||
s := []commonTypes.RollupStatus{
|
||||
commonTypes.RollupFinalized,
|
||||
}
|
||||
@@ -204,7 +216,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "GetRollupStatusByHashList", func(context.Context, []string) ([]commonTypes.RollupStatus, error) {
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "GetRollupStatusByHashList", func(hashes []string) ([]commonTypes.RollupStatus, error) {
|
||||
s := []commonTypes.RollupStatus{
|
||||
commonTypes.RollupPending,
|
||||
commonTypes.RollupCommitting,
|
||||
@@ -214,27 +226,41 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
|
||||
convey.Convey("db update RollupFinalized status failure", t, func() {
|
||||
targetErr := errors.New("UpdateFinalizeTxHashAndRollupStatus RollupFinalized failure")
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
return targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, targetErr.Error(), err.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateFinalizeTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
convey.Convey("db update RollupCommitted status failure", t, func() {
|
||||
targetErr := errors.New("UpdateCommitTxHashAndRollupStatus RollupCommitted failure")
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
return targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, targetErr.Error(), err.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
patchGuard.ApplyMethodFunc(blockBatchOrm, "UpdateCommitTxHashAndRollupStatus", func(context.Context, string, string, commonTypes.RollupStatus) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
var l2MessageOrm *orm.L2Message
|
||||
convey.Convey("db update layer2 status and layer1 hash failure", t, func() {
|
||||
targetErr := errors.New("UpdateLayer2StatusAndLayer1Hash failure")
|
||||
patchGuard.ApplyMethodFunc(l2MessageOrm, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
|
||||
return targetErr
|
||||
})
|
||||
err := watcher.FetchContractEvent()
|
||||
assert.Equal(t, targetErr.Error(), err.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(l2MessageOrm, "UpdateLayer2StatusAndLayer1Hash", func(context.Context, string, commonTypes.MsgStatus, string) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -260,7 +286,7 @@ func testL1WatcherClientFetchContractEvent(t *testing.T) {
|
||||
|
||||
func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
logs := []types.Log{
|
||||
{
|
||||
@@ -277,16 +303,17 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1QueueTransactionEventSignature success", t, func() {
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L1QueueTransactionEvent)
|
||||
tmpOut.QueueIndex = 100
|
||||
tmpOut.QueueIndex = big.NewInt(100)
|
||||
tmpOut.Data = []byte("test data")
|
||||
tmpOut.Sender = common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
|
||||
tmpOut.Value = big.NewInt(1000)
|
||||
@@ -296,17 +323,105 @@ func testParseBridgeEventLogsL1QueueTransactionEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, l2Messages, 1)
|
||||
assert.Equal(t, l2Messages[0].Value, big.NewInt(1000).String())
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1RelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
logs := []types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L1RelayedMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack RelayedMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog RelayedMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1RelayedMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L1RelayedMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1FailedRelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
logs := []types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L1FailedRelayedMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack FailedRelayedMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog FailedRelayedMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
convey.Convey("L1FailedRelayedMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L1FailedRelayedMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
logs := []types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L1CommitBatchEventSignature},
|
||||
@@ -322,9 +437,10 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
@@ -337,9 +453,10 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Len(t, rollupEvents, 1)
|
||||
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
|
||||
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupCommitted)
|
||||
@@ -348,7 +465,7 @@ func testParseBridgeEventLogsL1CommitBatchEventSignature(t *testing.T) {
|
||||
|
||||
func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
logs := []types.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L1FinalizeBatchEventSignature},
|
||||
@@ -364,9 +481,10 @@ func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, rollupEvents)
|
||||
})
|
||||
|
||||
@@ -379,9 +497,10 @@ func testParseBridgeEventLogsL1FinalizeBatchEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, relayedMessages, rollupEvents, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Len(t, rollupEvents, 1)
|
||||
assert.Equal(t, rollupEvents[0].batchHash, msgHash)
|
||||
assert.Equal(t, rollupEvents[0].status, commonTypes.RollupFinalized)
|
||||
|
||||
@@ -2,6 +2,7 @@ package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
@@ -30,6 +32,8 @@ var (
|
||||
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsAppendEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/append/events/total", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
@@ -40,8 +44,11 @@ type L2WatcherClient struct {
|
||||
|
||||
*ethclient.Client
|
||||
|
||||
l2BlockOrm *orm.L2Block
|
||||
l1MessageOrm *orm.L1Message
|
||||
db *gorm.DB
|
||||
blockBatchOrm *orm.BlockBatch
|
||||
blockTraceOrm *orm.BlockTrace
|
||||
l1MessageOrm *orm.L1Message
|
||||
l2MessageOrm *orm.L2Message
|
||||
|
||||
confirmations rpc.BlockNumber
|
||||
|
||||
@@ -60,29 +67,23 @@ type L2WatcherClient struct {
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
var savedHeight uint64
|
||||
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
|
||||
if err != nil || l1msg == nil {
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
savedHeight, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("fetch height from db failed", "err", err)
|
||||
savedHeight = 0
|
||||
} else {
|
||||
receipt, err := client.TransactionReceipt(ctx, common.HexToHash(l1msg.Layer2Hash))
|
||||
if err != nil || receipt == nil {
|
||||
log.Warn("get tx from l2 failed", "err", err)
|
||||
savedHeight = 0
|
||||
} else {
|
||||
savedHeight = receipt.BlockNumber.Uint64()
|
||||
}
|
||||
}
|
||||
|
||||
w := L2WatcherClient{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
Client: client,
|
||||
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
blockBatchOrm: orm.NewBlockBatch(db),
|
||||
blockTraceOrm: orm.NewBlockTrace(db),
|
||||
l1MessageOrm: orm.NewL1Message(db),
|
||||
processedMsgHeight: savedHeight,
|
||||
l2MessageOrm: l2MessageOrm,
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
@@ -95,28 +96,84 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
stopped: 0,
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
if err := w.initializeGenesis(); err != nil {
|
||||
panic(fmt.Sprintf("failed to initialize L2 genesis batch, err: %v", err))
|
||||
}
|
||||
|
||||
return &w
|
||||
}
|
||||
|
||||
func (w *L2WatcherClient) initializeGenesis() error {
|
||||
if count, err := w.blockBatchOrm.GetBatchCount(); err != nil {
|
||||
return fmt.Errorf("failed to get batch count: %v", err)
|
||||
} else if count > 0 {
|
||||
log.Info("genesis already imported")
|
||||
return nil
|
||||
}
|
||||
|
||||
genesis, err := w.HeaderByNumber(w.ctx, big.NewInt(0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve L2 genesis header: %v", err)
|
||||
}
|
||||
|
||||
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
|
||||
|
||||
blockTrace := &bridgeTypes.WrappedBlock{
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
}
|
||||
batchData := bridgeTypes.NewGenesisBatchData(blockTrace)
|
||||
|
||||
if err = orm.AddBatchInfoToDB(w.db, batchData); err != nil {
|
||||
log.Error("failed to add batch info to DB", "BatchHash", batchData.Hash(), "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
batchHash := batchData.Hash().Hex()
|
||||
|
||||
if err = w.blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskProved); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch proving status: %v", err)
|
||||
}
|
||||
|
||||
if err = w.blockBatchOrm.UpdateRollupStatus(w.ctx, batchHash, types.RollupFinalized); err != nil {
|
||||
return fmt.Errorf("failed to update genesis batch rollup status: %v", err)
|
||||
}
|
||||
|
||||
log.Info("successfully imported genesis batch")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const blockTracesFetchLimit = uint64(10)
|
||||
|
||||
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
|
||||
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx)
|
||||
// TryFetchRunningMissingBlocks try fetch missing blocks if inconsistent
|
||||
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
|
||||
// Get newest block in DB. must have blocks at that time.
|
||||
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
|
||||
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
|
||||
heightInDB, err := w.blockTraceOrm.GetL2BlocksLatestHeight()
|
||||
if err != nil {
|
||||
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch and store block traces for missing blocks
|
||||
for from := heightInDB + 1; from <= blockHeight; from += blockTracesFetchLimit {
|
||||
// Can't get trace from genesis block, so the default start number is 1.
|
||||
var from = uint64(1)
|
||||
if heightInDB > 0 {
|
||||
from = uint64(heightInDB) + 1
|
||||
}
|
||||
|
||||
for ; from <= blockHeight; from += blockTracesFetchLimit {
|
||||
to := from + blockTracesFetchLimit - 1
|
||||
|
||||
if to > blockHeight {
|
||||
to = blockHeight
|
||||
}
|
||||
|
||||
if err = w.getAndStoreBlockTraces(w.ctx, from, to); err != nil {
|
||||
// Get block traces and insert into db.
|
||||
if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil {
|
||||
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
|
||||
return
|
||||
}
|
||||
@@ -129,20 +186,10 @@ func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
|
||||
txsData := make([]*gethTypes.TransactionData, len(txs))
|
||||
for i, tx := range txs {
|
||||
v, r, s := tx.RawSignatureValues()
|
||||
|
||||
nonce := tx.Nonce()
|
||||
|
||||
// We need QueueIndex in `NewBatchHeader`. However, `TransactionData`
|
||||
// does not have this field. Since `L1MessageTx` do not have a nonce,
|
||||
// we reuse this field for storing the queue index.
|
||||
if msg := tx.AsL1MessageTx(); msg != nil {
|
||||
nonce = msg.QueueIndex
|
||||
}
|
||||
|
||||
txsData[i] = &gethTypes.TransactionData{
|
||||
Type: tx.Type(),
|
||||
TxHash: tx.Hash().String(),
|
||||
Nonce: nonce,
|
||||
Nonce: tx.Nonce(),
|
||||
ChainId: (*hexutil.Big)(tx.ChainId()),
|
||||
Gas: tx.Gas(),
|
||||
GasPrice: (*hexutil.Big)(tx.GasPrice()),
|
||||
@@ -159,7 +206,7 @@ func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
|
||||
}
|
||||
|
||||
func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
|
||||
var blocks []*types.WrappedBlock
|
||||
var blocks []*bridgeTypes.WrappedBlock
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block", "height", number)
|
||||
block, err2 := w.BlockByNumber(ctx, big.NewInt(int64(number)))
|
||||
@@ -174,7 +221,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
|
||||
return fmt.Errorf("failed to get withdrawTrieRoot: %v. number: %v", err3, number)
|
||||
}
|
||||
|
||||
blocks = append(blocks, &types.WrappedBlock{
|
||||
blocks = append(blocks, &bridgeTypes.WrappedBlock{
|
||||
Header: block.Header(),
|
||||
Transactions: txsToTxsData(block.Transactions()),
|
||||
WithdrawTrieRoot: common.BytesToHash(withdrawTrieRoot),
|
||||
@@ -182,7 +229,7 @@ func (w *L2WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to u
|
||||
}
|
||||
|
||||
if len(blocks) > 0 {
|
||||
if err := w.l2BlockOrm.InsertL2Blocks(w.ctx, blocks); err != nil {
|
||||
if err := w.blockTraceOrm.InsertWrappedBlocks(blocks); err != nil {
|
||||
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -240,15 +287,17 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("failed to parse emitted event log", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
sentMessageCount := int64(len(sentMessageEvents))
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
bridgeL2MsgsSentEventsTotalCounter.Inc(sentMessageCount)
|
||||
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
|
||||
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
|
||||
log.Info("L2 events types", "SentMessageCount", sentMessageCount, "RelayedMessageCount", relayedMessageCount)
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
// Since, we always start sync from the latest unprocessed message.
|
||||
@@ -265,24 +314,71 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.l2MessageOrm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
log.Error("failed to save l2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgsSyncHeightGauge.Update(to)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]relayedMessage, error) {
|
||||
func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]orm.L2Message, []relayedMessage, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2Messages []orm.L2Message
|
||||
var relayedMessages []relayedMessage
|
||||
var lastAppendMsgHash common.Hash
|
||||
var lastAppendMsgNonce uint64
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case bridgeAbi.L2SentMessageEventSignature:
|
||||
event := bridgeAbi.L2SentMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "SentMessage", vLog)
|
||||
if err != nil {
|
||||
log.Error("failed to unpack layer2 SentMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
computedMsgHash := utils.ComputeMessageHash(
|
||||
event.Sender,
|
||||
event.Target,
|
||||
event.Value,
|
||||
event.MessageNonce,
|
||||
event.Message,
|
||||
)
|
||||
|
||||
// `AppendMessage` event is always emitted before `SentMessage` event
|
||||
// So they should always match, just double check
|
||||
if event.MessageNonce.Uint64() != lastAppendMsgNonce {
|
||||
errMsg := fmt.Sprintf("l2 message nonces mismatch: AppendMessage.nonce=%v, SentMessage.nonce=%v, tx_hash=%v",
|
||||
lastAppendMsgNonce, event.MessageNonce.Uint64(), vLog.TxHash.Hex())
|
||||
return l2Messages, relayedMessages, errors.New(errMsg)
|
||||
}
|
||||
if computedMsgHash != lastAppendMsgHash {
|
||||
errMsg := fmt.Sprintf("l2 message hashes mismatch: AppendMessage.msg_hash=%v, SentMessage.msg_hash=%v, tx_hash=%v",
|
||||
lastAppendMsgHash.Hex(), computedMsgHash.Hex(), vLog.TxHash.Hex())
|
||||
return l2Messages, relayedMessages, errors.New(errMsg)
|
||||
}
|
||||
|
||||
l2Messages = append(l2Messages, orm.L2Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: computedMsgHash.String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
Target: event.Target.String(),
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Layer2Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case bridgeAbi.L2RelayedMessageEventSignature:
|
||||
event := bridgeAbi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "RelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 RelayedMessage event", "err", err)
|
||||
return relayedMessages, err
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
@@ -295,7 +391,7 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]relayedM
|
||||
err := utils.UnpackLog(w.messengerABI, &event, "FailedRelayedMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 FailedRelayedMessage event", "err", err)
|
||||
return relayedMessages, err
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
relayedMessages = append(relayedMessages, relayedMessage{
|
||||
@@ -303,9 +399,21 @@ func (w *L2WatcherClient) parseBridgeEventLogs(logs []gethTypes.Log) ([]relayedM
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case bridgeAbi.L2AppendMessageEventSignature:
|
||||
event := bridgeAbi.L2AppendMessageEvent{}
|
||||
err := utils.UnpackLog(w.messageQueueABI, &event, "AppendMessage", vLog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack layer2 AppendMessage event", "err", err)
|
||||
return l2Messages, relayedMessages, err
|
||||
}
|
||||
|
||||
lastAppendMsgHash = event.MessageHash
|
||||
lastAppendMsgNonce = event.Index.Uint64()
|
||||
bridgeL2MsgsAppendEventsTotalCounter.Inc(1)
|
||||
default:
|
||||
log.Error("Unknown event", "topic", vLog.Topics[0], "txHash", vLog.TxHash)
|
||||
}
|
||||
}
|
||||
|
||||
return relayedMessages, nil
|
||||
return l2Messages, relayedMessages, nil
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
cutils "scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -43,7 +43,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
cancel()
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
}()
|
||||
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
@@ -67,9 +67,140 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
|
||||
}
|
||||
|
||||
func testMonitorBridgeContract(t *testing.T) {
|
||||
wc, db := setupL2Watcher(t)
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
cancel()
|
||||
defer utils.CloseDB(db)
|
||||
}()
|
||||
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
previousHeight, err := l2Cli.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
// deploy mock bridge
|
||||
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rc := prepareWatcherClient(l2Cli, db, address)
|
||||
loopToFetchEvent(subCtx, rc)
|
||||
// Call mock_bridge instance sendMessage to trigger emit events
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// extra block mined
|
||||
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message = []byte("testbridgecontract")
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
// check if we successfully stored events
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
|
||||
return err == nil && height > int64(previousHeight)
|
||||
}))
|
||||
|
||||
// check l1 messages.
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
|
||||
return err == nil && len(msgs) == 2
|
||||
}))
|
||||
}
|
||||
|
||||
func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
cancel()
|
||||
defer utils.CloseDB(db)
|
||||
}()
|
||||
|
||||
previousHeight, err := l2Cli.BlockNumber(context.Background()) // shallow the global previousHeight
|
||||
assert.NoError(t, err)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
wc := prepareWatcherClient(l2Cli, db, address)
|
||||
loopToFetchEvent(subCtx, wc)
|
||||
|
||||
// Call mock_bridge instance sendMessage to trigger emit events multiple times
|
||||
numTransactions := 4
|
||||
var tx *gethTypes.Transaction
|
||||
for i := 0; i < numTransactions; i++ {
|
||||
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
|
||||
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
|
||||
assert.NoError(t, nounceErr)
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// extra block mined
|
||||
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
|
||||
nonce, nounceErr := l2Cli.PendingNonceAt(context.Background(), addr)
|
||||
assert.NoError(t, nounceErr)
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
message := []byte("testbridgecontract")
|
||||
fee := big.NewInt(0)
|
||||
gasLimit := big.NewInt(1)
|
||||
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
|
||||
assert.NoError(t, err)
|
||||
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
if receipt.Status != gethTypes.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
l2MessageOrm := orm.NewL2Message(db)
|
||||
// check if we successfully stored events
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
height, err := l2MessageOrm.GetLayer2LatestWatchedHeight()
|
||||
return err == nil && height > int64(previousHeight)
|
||||
}))
|
||||
|
||||
assert.True(t, cutils.TryTimes(10, func() bool {
|
||||
msgs, err := l2MessageOrm.GetL2Messages(map[string]interface{}{"status": types.MsgPending}, nil, 0)
|
||||
return err == nil && len(msgs) == 5
|
||||
}))
|
||||
}
|
||||
|
||||
func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
|
||||
@@ -79,16 +210,16 @@ func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
blockTraceOrm := orm.NewBlockTrace(db)
|
||||
ok := cutils.TryTimes(10, func() bool {
|
||||
latestHeight, err := l2Cli.BlockNumber(context.Background())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
wc := prepareWatcherClient(l2Cli, db, address)
|
||||
wc.TryFetchRunningMissingBlocks(latestHeight)
|
||||
fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
|
||||
return err == nil && fetchedHeight == latestHeight
|
||||
wc.TryFetchRunningMissingBlocks(context.Background(), latestHeight)
|
||||
fetchedHeight, err := blockTraceOrm.GetL2BlocksLatestHeight()
|
||||
return err == nil && uint64(fetchedHeight) == latestHeight
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -105,7 +236,6 @@ func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.Privat
|
||||
assert.NoError(t, err)
|
||||
auth.GasPrice, err = l2Cli.SuggestGasPrice(context.Background())
|
||||
assert.NoError(t, err)
|
||||
auth.GasLimit = 500000
|
||||
return auth
|
||||
}
|
||||
|
||||
@@ -113,9 +243,60 @@ func loopToFetchEvent(subCtx context.Context, watcher *L2WatcherClient) {
|
||||
go cutils.Loop(subCtx, 2*time.Second, watcher.FetchContractEvent)
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2SentMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL2Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
logs := []gethTypes.Log{
|
||||
{
|
||||
Topics: []common.Hash{
|
||||
bridgeAbi.L2SentMessageEventSignature,
|
||||
},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack SentMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog SentMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
convey.Convey("L2SentMessageEventSignature success", t, func() {
|
||||
tmpSendAddr := common.HexToAddress("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30")
|
||||
tmpTargetAddr := common.HexToAddress("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
tmpValue := big.NewInt(1000)
|
||||
tmpMessageNonce := big.NewInt(100)
|
||||
tmpMessage := []byte("test for L2SentMessageEventSignature")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L2SentMessageEvent)
|
||||
tmpOut.Sender = tmpSendAddr
|
||||
tmpOut.Value = tmpValue
|
||||
tmpOut.Target = tmpTargetAddr
|
||||
tmpOut.MessageNonce = tmpMessageNonce
|
||||
tmpOut.Message = tmpMessage
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, relayedMessages)
|
||||
assert.Empty(t, l2Messages)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL2Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
logs := []gethTypes.Log{
|
||||
{
|
||||
@@ -132,8 +313,9 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
@@ -146,8 +328,9 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
@@ -155,7 +338,7 @@ func testParseBridgeEventLogsL2RelayedMessageEventSignature(t *testing.T) {
|
||||
|
||||
func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL2Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
logs := []gethTypes.Log{
|
||||
{
|
||||
@@ -172,8 +355,9 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
@@ -186,9 +370,51 @@ func testParseBridgeEventLogsL2FailedRelayedMessageEventSignature(t *testing.T)
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Len(t, relayedMessages, 1)
|
||||
assert.Equal(t, relayedMessages[0].msgHash, msgHash)
|
||||
})
|
||||
}
|
||||
|
||||
func testParseBridgeEventLogsL2AppendMessageEventSignature(t *testing.T) {
|
||||
watcher, db := setupL2Watcher(t)
|
||||
defer utils.CloseDB(db)
|
||||
logs := []gethTypes.Log{
|
||||
{
|
||||
Topics: []common.Hash{bridgeAbi.L2AppendMessageEventSignature},
|
||||
BlockNumber: 100,
|
||||
TxHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
},
|
||||
}
|
||||
|
||||
convey.Convey("unpack AppendMessage log failure", t, func() {
|
||||
targetErr := errors.New("UnpackLog AppendMessage failure")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
return targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.EqualError(t, err, targetErr.Error())
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
|
||||
convey.Convey("L2AppendMessageEventSignature success", t, func() {
|
||||
msgHash := common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5")
|
||||
patchGuard := gomonkey.ApplyFunc(utils.UnpackLog, func(c *abi.ABI, out interface{}, event string, log gethTypes.Log) error {
|
||||
tmpOut := out.(*bridgeAbi.L2AppendMessageEvent)
|
||||
tmpOut.MessageHash = msgHash
|
||||
tmpOut.Index = big.NewInt(100)
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
l2Messages, relayedMessages, err := watcher.parseBridgeEventLogs(logs)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, l2Messages)
|
||||
assert.Empty(t, relayedMessages)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -9,13 +9,12 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm/migrate"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -28,8 +27,8 @@ var (
|
||||
l2Cli *ethclient.Client
|
||||
|
||||
// block trace
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
wrappedBlock1 *bridgeTypes.WrappedBlock
|
||||
wrappedBlock2 *bridgeTypes.WrappedBlock
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) (err error) {
|
||||
@@ -41,7 +40,7 @@ func setupEnv(t *testing.T) (err error) {
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.DBConfig = &database.Config{
|
||||
cfg.DBConfig = &config.DBConfig{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
@@ -57,7 +56,7 @@ func setupEnv(t *testing.T) (err error) {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
wrappedBlock1 = &types.WrappedBlock{}
|
||||
wrappedBlock1 = &bridgeTypes.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace1, wrappedBlock1); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -67,7 +66,7 @@ func setupEnv(t *testing.T) (err error) {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
wrappedBlock2 = &types.WrappedBlock{}
|
||||
wrappedBlock2 = &bridgeTypes.WrappedBlock{}
|
||||
if err = json.Unmarshal(templateBlockTrace2, wrappedBlock2); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -75,7 +74,7 @@ func setupEnv(t *testing.T) (err error) {
|
||||
}
|
||||
|
||||
func setupDB(t *testing.T) *gorm.DB {
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
db, err := utils.InitDB(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -101,18 +100,23 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestL1WatcherClientFetchBlockHeader", testL1WatcherClientFetchBlockHeader)
|
||||
t.Run("TestL1WatcherClientFetchContractEvent", testL1WatcherClientFetchContractEvent)
|
||||
t.Run("TestParseBridgeEventLogsL1QueueTransactionEventSignature", testParseBridgeEventLogsL1QueueTransactionEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1RelayedMessageEventSignature", testParseBridgeEventLogsL1RelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1FailedRelayedMessageEventSignature", testParseBridgeEventLogsL1FailedRelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1CommitBatchEventSignature", testParseBridgeEventLogsL1CommitBatchEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL1FinalizeBatchEventSignature", testParseBridgeEventLogsL1FinalizeBatchEventSignature)
|
||||
|
||||
// Run l2 watcher test cases.
|
||||
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
|
||||
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
|
||||
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
|
||||
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
|
||||
t.Run("TestParseBridgeEventLogsL2SentMessageEventSignature", testParseBridgeEventLogsL2SentMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2RelayedMessageEventSignature", testParseBridgeEventLogsL2RelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature)
|
||||
t.Run("TestParseBridgeEventLogsL2AppendMessageEventSignature", testParseBridgeEventLogsL2AppendMessageEventSignature)
|
||||
|
||||
// Run chunk-proposer test cases.
|
||||
t.Run("TestChunkProposer", testChunkProposer)
|
||||
|
||||
// Run batch-proposer test cases.
|
||||
t.Run("TestBatchProposer", testBatchProposer)
|
||||
// Run batch proposer test cases.
|
||||
t.Run("TestBatchProposerProposeBatch", testBatchProposerProposeBatch)
|
||||
t.Run("TestBatchProposerBatchGeneration", testBatchProposerBatchGeneration)
|
||||
t.Run("TestBatchProposerGracefulRestart", testBatchProposerGracefulRestart)
|
||||
}
|
||||
|
||||
@@ -1,411 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const defaultBatchHeaderVersion = 0
|
||||
|
||||
// Batch represents a batch of chunks.
|
||||
type Batch struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
|
||||
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
|
||||
|
||||
// proof
|
||||
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
|
||||
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
|
||||
// rollup
|
||||
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
|
||||
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
|
||||
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
|
||||
|
||||
// gas oracle
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewBatch creates a new Batch database instance.
|
||||
func NewBatch(db *gorm.DB) *Batch {
|
||||
return &Batch{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the Batch model.
|
||||
func (*Batch) TableName() string {
|
||||
return "batch"
|
||||
}
|
||||
|
||||
// GetBatches retrieves selected batches from the database.
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetBatches(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
|
||||
for _, orderBy := range orderByList {
|
||||
db = db.Order(orderBy)
|
||||
}
|
||||
|
||||
if limit > 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
db = db.Order("index ASC")
|
||||
|
||||
var batches []*Batch
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetBatches error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// GetBatchCount retrieves the total number of batches in the database.
|
||||
func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
|
||||
var count int64
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("Batch.GetBatchCount error: %w", err)
|
||||
}
|
||||
return uint64(count), nil
|
||||
}
|
||||
|
||||
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
|
||||
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.AggProof, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Select("proof")
|
||||
db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified)
|
||||
|
||||
var batch Batch
|
||||
if err := db.Find(&batch).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
|
||||
var proof message.AggProof
|
||||
if err := json.Unmarshal(batch.Proof, &proof); err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
return &proof, nil
|
||||
}
|
||||
|
||||
// GetLatestBatch retrieves the latest batch from the database.
|
||||
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Order("index desc")
|
||||
|
||||
var latestBatch Batch
|
||||
if err := db.First(&latestBatch).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
|
||||
}
|
||||
return &latestBatch, nil
|
||||
}
|
||||
|
||||
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
|
||||
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
|
||||
if len(hashes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Select("hash, rollup_status")
|
||||
db = db.Where("hash IN ?", hashes)
|
||||
|
||||
var batches []Batch
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetRollupStatusByHashList error: %w, hashes: %v", err, hashes)
|
||||
}
|
||||
|
||||
hashToStatusMap := make(map[string]types.RollupStatus)
|
||||
for _, batch := range batches {
|
||||
hashToStatusMap[batch.Hash] = types.RollupStatus(batch.RollupStatus)
|
||||
}
|
||||
|
||||
var statuses []types.RollupStatus
|
||||
for _, hash := range hashes {
|
||||
status, ok := hashToStatusMap[hash]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Batch.GetRollupStatusByHashList: hash not found in database: %s", hash)
|
||||
}
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
// GetPendingBatches retrieves pending batches up to the specified limit.
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetPendingBatches(ctx context.Context, limit int) ([]*Batch, error) {
|
||||
if limit <= 0 {
|
||||
return nil, errors.New("limit must be greater than zero")
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("rollup_status = ?", types.RollupPending)
|
||||
db = db.Order("index ASC")
|
||||
db = db.Limit(limit)
|
||||
|
||||
var batches []*Batch
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetPendingBatches error: %w", err)
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// GetBatchByIndex retrieves the batch by the given index.
|
||||
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("index = ?", index)
|
||||
|
||||
var batch Batch
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if len(chunks) == 0 {
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
|
||||
parentBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
|
||||
log.Error("failed to get the latest batch", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var batchIndex uint64
|
||||
var parentBatchHash common.Hash
|
||||
var totalL1MessagePoppedBefore uint64
|
||||
var version uint8 = defaultBatchHeaderVersion
|
||||
|
||||
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
|
||||
// not batch record in the db, we then use default empty values for the creating batch;
|
||||
// if parentBatch!=nil then err=nil, then we fill the parentBatch-related data into the creating batch
|
||||
if parentBatch != nil {
|
||||
batchIndex = parentBatch.Index + 1
|
||||
parentBatchHash = common.HexToHash(parentBatch.Hash)
|
||||
|
||||
var parentBatchHeader *types.BatchHeader
|
||||
parentBatchHeader, err = types.DecodeBatchHeader(parentBatch.BatchHeader)
|
||||
if err != nil {
|
||||
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalL1MessagePoppedBefore = parentBatchHeader.TotalL1MessagePopped()
|
||||
version = parentBatchHeader.Version()
|
||||
}
|
||||
|
||||
batchHeader, err := types.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to create batch header",
|
||||
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
|
||||
"parent hash", parentBatchHash, "number of chunks", len(chunks), "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
numChunks := len(chunks)
|
||||
lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
|
||||
|
||||
newBatch := Batch{
|
||||
Index: batchIndex,
|
||||
Hash: batchHeader.Hash().Hex(),
|
||||
StartChunkHash: startChunkHash,
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endChunkHash,
|
||||
EndChunkIndex: endChunkIndex,
|
||||
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
|
||||
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawTrieRoot.Hex(),
|
||||
BatchHeader: batchHeader.Encode(),
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
|
||||
if err := db.Create(&newBatch).Error; err != nil {
|
||||
log.Error("failed to insert batch", "batch", newBatch, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
return &newBatch, nil
|
||||
}
|
||||
|
||||
// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch.
|
||||
func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["oracle_status"] = int(status)
|
||||
updateFields["oracle_tx_hash"] = txHash
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateL2GasOracleStatusAndOracleTxHash error: %w, batch hash: %v, status: %v, txHash: %v", err, hash, status.String(), txHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a batch.
|
||||
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateRollupStatus updates the rollup status of a batch.
|
||||
func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["rollup_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.RollupCommitted:
|
||||
updateFields["committed_at"] = time.Now()
|
||||
case types.RollupFinalized:
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateRollupStatus error: %w, batch hash: %v, status: %v", err, hash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateCommitTxHashAndRollupStatus updates the commit transaction hash and rollup status for a batch.
|
||||
func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["commit_tx_hash"] = commitTxHash
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupCommitted {
|
||||
updateFields["committed_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateCommitTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), commitTxHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateFinalizeTxHashAndRollupStatus updates the finalize transaction hash and rollup status for a batch.
|
||||
func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["finalize_tx_hash"] = finalizeTxHash
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupFinalized {
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), finalizeTxHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProofByHash updates the batch proof by hash.
|
||||
// for unit test.
|
||||
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proof"] = proofBytes
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err = db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
292
bridge/internal/orm/block_batch.go
Normal file
292
bridge/internal/orm/block_batch.go
Normal file
@@ -0,0 +1,292 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
)
|
||||
|
||||
// BlockBatch is structure of stored block batch message
|
||||
type BlockBatch struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
|
||||
ParentHash string `json:"parent_hash" gorm:"column:parent_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" gorm:"column:total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" gorm:"column:total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" gorm:"column:total_l2_gas"`
|
||||
ProvingStatus int `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof"`
|
||||
InstanceCommitments []byte `json:"instance_commitments" gorm:"column:instance_commitments"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:0"`
|
||||
RollupStatus int `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
|
||||
OracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at;default:CURRENT_TIMESTAMP()"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewBlockBatch create an blockBatchOrm instance
|
||||
func NewBlockBatch(db *gorm.DB) *BlockBatch {
|
||||
return &BlockBatch{db: db}
|
||||
}
|
||||
|
||||
// TableName define the BlockBatch table name
|
||||
func (*BlockBatch) TableName() string {
|
||||
return "block_batch"
|
||||
}
|
||||
|
||||
// GetBatchCount get the batch count
|
||||
func (o *BlockBatch) GetBatchCount() (int64, error) {
|
||||
var count int64
|
||||
if err := o.db.Model(&BlockBatch{}).Count(&count).Error; err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetBlockBatches get the select block batches
|
||||
func (o *BlockBatch) GetBlockBatches(fields map[string]interface{}, orderByList []string, limit int) ([]BlockBatch, error) {
|
||||
var blockBatches []BlockBatch
|
||||
db := o.db
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
|
||||
for _, orderBy := range orderByList {
|
||||
db = db.Order(orderBy)
|
||||
}
|
||||
|
||||
if limit != 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
if err := db.Find(&blockBatches).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockBatches, nil
|
||||
}
|
||||
|
||||
// GetBlockBatchesHashByRollupStatus get the block batches by rollup status
|
||||
func (o *BlockBatch) GetBlockBatchesHashByRollupStatus(status types.RollupStatus, limit int) ([]string, error) {
|
||||
var blockBatches []BlockBatch
|
||||
err := o.db.Select("hash").Where("rollup_status", int(status)).Order("index ASC").Limit(limit).Find(&blockBatches).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hashes []string
|
||||
for _, v := range blockBatches {
|
||||
hashes = append(hashes, v.Hash)
|
||||
}
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
// GetVerifiedProofAndInstanceCommitmentsByHash get verified proof and instance comments by hash
|
||||
func (o *BlockBatch) GetVerifiedProofAndInstanceCommitmentsByHash(hash string) ([]byte, []byte, error) {
|
||||
var blockBatch BlockBatch
|
||||
err := o.db.Select("proof, instance_commitments").Where("hash", hash).Where("proving_status", int(types.ProvingTaskVerified)).Find(&blockBatch).Error
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return blockBatch.Proof, blockBatch.InstanceCommitments, nil
|
||||
}
|
||||
|
||||
// GetLatestBatch get the latest batch
|
||||
// because we will `initializeGenesis()` when we start the `L2Watcher`, so a batch must exist.
|
||||
func (o *BlockBatch) GetLatestBatch() (*BlockBatch, error) {
|
||||
var blockBatch BlockBatch
|
||||
err := o.db.Order("index DESC").Limit(1).First(&blockBatch).Error
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
return &blockBatch, nil
|
||||
}
|
||||
|
||||
// GetLatestBatchByRollupStatus get the latest block batch by rollup status
|
||||
func (o *BlockBatch) GetLatestBatchByRollupStatus(rollupStatuses []types.RollupStatus) (*BlockBatch, error) {
|
||||
var tmpRollupStatus []int
|
||||
for _, v := range rollupStatuses {
|
||||
tmpRollupStatus = append(tmpRollupStatus, int(v))
|
||||
}
|
||||
var blockBatch BlockBatch
|
||||
err := o.db.Where("rollup_status IN (?)", tmpRollupStatus).Order("index DESC").Limit(1).First(&blockBatch).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &blockBatch, nil
|
||||
}
|
||||
|
||||
// GetRollupStatusByHashList get rollup status by hash list
|
||||
func (o *BlockBatch) GetRollupStatusByHashList(hashes []string) ([]types.RollupStatus, error) {
|
||||
if len(hashes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var blockBatches []BlockBatch
|
||||
err := o.db.Select("hash, rollup_status").Where("hash IN (?)", hashes).Find(&blockBatches).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var statuses []types.RollupStatus
|
||||
for _, v := range blockBatches {
|
||||
statuses = append(statuses, types.RollupStatus(v.RollupStatus))
|
||||
}
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
// InsertBlockBatchByBatchData insert a block batch data by the BatchData
|
||||
func (o *BlockBatch) InsertBlockBatchByBatchData(tx *gorm.DB, batchData *bridgeTypes.BatchData) (int64, error) {
|
||||
var db *gorm.DB
|
||||
if tx != nil {
|
||||
db = tx
|
||||
} else {
|
||||
db = o.db
|
||||
}
|
||||
|
||||
numBlocks := len(batchData.Batch.Blocks)
|
||||
insertBlockBatch := BlockBatch{
|
||||
Hash: batchData.Hash().Hex(),
|
||||
Index: batchData.Batch.BatchIndex,
|
||||
StartBlockNumber: batchData.Batch.Blocks[0].BlockNumber,
|
||||
StartBlockHash: batchData.Batch.Blocks[0].BlockHash.Hex(),
|
||||
EndBlockNumber: batchData.Batch.Blocks[numBlocks-1].BlockNumber,
|
||||
EndBlockHash: batchData.Batch.Blocks[numBlocks-1].BlockHash.Hex(),
|
||||
ParentHash: batchData.Batch.ParentBatchHash.Hex(),
|
||||
StateRoot: batchData.Batch.NewStateRoot.Hex(),
|
||||
TotalTxNum: batchData.TotalTxNum,
|
||||
TotalL1TxNum: batchData.TotalL1TxNum,
|
||||
TotalL2Gas: batchData.TotalL2Gas,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
result := db.Create(&insertBlockBatch)
|
||||
if result.Error != nil {
|
||||
log.Error("failed to insert block batch by batchData", "err", result.Error)
|
||||
return 0, result.Error
|
||||
}
|
||||
return result.RowsAffected, nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatus update the proving status
|
||||
func (o *BlockBatch) UpdateProvingStatus(hash string, status types.ProvingStatus) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
default:
|
||||
}
|
||||
|
||||
if err := o.db.Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateRollupStatus update the rollup status
|
||||
func (o *BlockBatch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["rollup_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.RollupCommitted:
|
||||
updateFields["committed_at"] = time.Now()
|
||||
case types.RollupFinalized:
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
}
|
||||
if err := o.db.Model(&BlockBatch{}).WithContext(ctx).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateSkippedBatches update the skipped batches
|
||||
func (o *BlockBatch) UpdateSkippedBatches() (int64, error) {
|
||||
provingStatusList := []interface{}{
|
||||
int(types.ProvingTaskSkipped),
|
||||
int(types.ProvingTaskFailed),
|
||||
}
|
||||
result := o.db.Model(&BlockBatch{}).Where("rollup_status", int(types.RollupCommitted)).
|
||||
Where("proving_status IN (?)", provingStatusList).Update("rollup_status", int(types.RollupFinalizationSkipped))
|
||||
if result.Error != nil {
|
||||
return 0, result.Error
|
||||
}
|
||||
return result.RowsAffected, nil
|
||||
}
|
||||
|
||||
// UpdateCommitTxHashAndRollupStatus update the commit tx hash and rollup status
|
||||
func (o *BlockBatch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["commit_tx_hash"] = commitTxHash
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupCommitted {
|
||||
updateFields["committed_at"] = time.Now()
|
||||
}
|
||||
if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateFinalizeTxHashAndRollupStatus update the finalize tx hash and rollup status
|
||||
func (o *BlockBatch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["finalize_tx_hash"] = finalizeTxHash
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupFinalized {
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
}
|
||||
if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL2GasOracleStatusAndOracleTxHash update the l2 gas oracle status and oracle tx hash
|
||||
func (o *BlockBatch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["oracle_status"] = int(status)
|
||||
updateFields["oracle_tx_hash"] = txHash
|
||||
if err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProofByHash update the block batch proof by hash
|
||||
// for unit test
|
||||
func (o *BlockBatch) UpdateProofByHash(ctx context.Context, hash string, proof, instanceCommitments []byte, proofTimeSec uint64) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proof"] = proof
|
||||
updateFields["instance_commitments"] = instanceCommitments
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error
|
||||
if err != nil {
|
||||
log.Error("failed to update proof", "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
155
bridge/internal/orm/block_trace.go
Normal file
155
bridge/internal/orm/block_trace.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge/internal/types"
|
||||
)
|
||||
|
||||
// BlockTrace is structure of stored block trace message
|
||||
type BlockTrace struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
Number uint64 `json:"number" gorm:"number"`
|
||||
Hash string `json:"hash" gorm:"hash"`
|
||||
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
|
||||
Trace string `json:"trace" gorm:"column:trace"`
|
||||
BatchHash string `json:"batch_hash" gorm:"batch_hash;default:NULL"`
|
||||
TxNum uint64 `json:"tx_num" gorm:"tx_num"`
|
||||
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
|
||||
}
|
||||
|
||||
// NewBlockTrace create an blockTraceOrm instance
|
||||
func NewBlockTrace(db *gorm.DB) *BlockTrace {
|
||||
return &BlockTrace{db: db}
|
||||
}
|
||||
|
||||
// TableName define the BlockTrace table name
|
||||
func (*BlockTrace) TableName() string {
|
||||
return "block_trace"
|
||||
}
|
||||
|
||||
// GetL2BlocksLatestHeight get the l2 blocks latest height
|
||||
func (o *BlockTrace) GetL2BlocksLatestHeight() (int64, error) {
|
||||
result := o.db.Model(&BlockTrace{}).Select("COALESCE(MAX(number), -1)").Row()
|
||||
if result.Err() != nil {
|
||||
return -1, result.Err()
|
||||
}
|
||||
var maxNumber int64
|
||||
if err := result.Scan(&maxNumber); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return maxNumber, nil
|
||||
}
|
||||
|
||||
// GetL2WrappedBlocks get the l2 wrapped blocks
|
||||
func (o *BlockTrace) GetL2WrappedBlocks(fields map[string]interface{}) ([]*types.WrappedBlock, error) {
|
||||
var blockTraces []BlockTrace
|
||||
db := o.db.Select("trace")
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
if err := db.Find(&blockTraces).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
for _, v := range blockTraces {
|
||||
var wrappedBlock types.WrappedBlock
|
||||
if err := json.Unmarshal([]byte(v.Trace), &wrappedBlock); err != nil {
|
||||
break
|
||||
}
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
}
|
||||
return wrappedBlocks, nil
|
||||
}
|
||||
|
||||
// GetL2BlockInfos get l2 block infos
|
||||
func (o *BlockTrace) GetL2BlockInfos(fields map[string]interface{}, orderByList []string, limit int) ([]BlockTrace, error) {
|
||||
var blockTraces []BlockTrace
|
||||
db := o.db.Select("number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp")
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
|
||||
for _, orderBy := range orderByList {
|
||||
db = db.Order(orderBy)
|
||||
}
|
||||
|
||||
if limit != 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
if err := db.Find(&blockTraces).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockTraces, nil
|
||||
}
|
||||
|
||||
// GetUnbatchedL2Blocks get unbatched l2 blocks
|
||||
func (o *BlockTrace) GetUnbatchedL2Blocks(fields map[string]interface{}, orderByList []string, limit int) ([]BlockTrace, error) {
|
||||
var unbatchedBlockTraces []BlockTrace
|
||||
db := o.db.Select("number, hash, parent_hash, batch_hash, tx_num, gas_used, block_timestamp").Where("batch_hash is NULL")
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
if err := db.Find(&unbatchedBlockTraces).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return unbatchedBlockTraces, nil
|
||||
}
|
||||
|
||||
// InsertWrappedBlocks insert block to block trace
|
||||
func (o *BlockTrace) InsertWrappedBlocks(blocks []*types.WrappedBlock) error {
|
||||
var blockTraces []BlockTrace
|
||||
for _, block := range blocks {
|
||||
number := block.Header.Number.Uint64()
|
||||
hash := block.Header.Hash().String()
|
||||
txNum := len(block.Transactions)
|
||||
mtime := block.Header.Time
|
||||
gasCost := block.Header.GasUsed
|
||||
|
||||
data, err := json.Marshal(block)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal block", "hash", hash, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
tmpBlockTrace := BlockTrace{
|
||||
Number: number,
|
||||
Hash: hash,
|
||||
ParentHash: block.Header.ParentHash.String(),
|
||||
Trace: string(data),
|
||||
TxNum: uint64(txNum),
|
||||
GasUsed: gasCost,
|
||||
BlockTimestamp: mtime,
|
||||
}
|
||||
blockTraces = append(blockTraces, tmpBlockTrace)
|
||||
}
|
||||
|
||||
if err := o.db.Create(&blockTraces).Error; err != nil {
|
||||
log.Error("failed to insert blockTraces", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBatchHashForL2Blocks update the batch_hash of block trace
|
||||
func (o *BlockTrace) UpdateBatchHashForL2Blocks(tx *gorm.DB, numbers []uint64, batchHash string) error {
|
||||
var db *gorm.DB
|
||||
if tx != nil {
|
||||
db = tx
|
||||
} else {
|
||||
db = o.db
|
||||
}
|
||||
|
||||
err := db.Model(&BlockTrace{}).Where("number IN (?)", numbers).Update("batch_hash", batchHash).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,227 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// Chunk represents a chunk of blocks in the database.
|
||||
type Chunk struct {
|
||||
db *gorm.DB `gorm:"-"`
|
||||
|
||||
// chunk
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
|
||||
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
|
||||
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
|
||||
TotalL1MessagesPoppedInChunk uint32 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
|
||||
|
||||
// proof
|
||||
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint32 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
TotalL1CommitCalldataSize uint32 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
|
||||
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewChunk creates a new Chunk database instance.
|
||||
func NewChunk(db *gorm.DB) *Chunk {
|
||||
return &Chunk{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the chunk model.
|
||||
func (*Chunk) TableName() string {
|
||||
return "chunk"
|
||||
}
|
||||
|
||||
// GetChunksInRange retrieves chunks within a given range (inclusive) from the database.
|
||||
// The range is closed, i.e., it includes both start and end indices.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endIndex uint64) ([]*Chunk, error) {
|
||||
if startIndex > endIndex {
|
||||
return nil, fmt.Errorf("Chunk.GetChunksInRange: start index should be less than or equal to end index, start index: %v, end index: %v", startIndex, endIndex)
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
|
||||
db = db.Order("index ASC")
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetChunksInRange error: %w, start index: %v, end index: %v", err, startIndex, endIndex)
|
||||
}
|
||||
|
||||
// sanity check
|
||||
if uint64(len(chunks)) != endIndex-startIndex+1 {
|
||||
return nil, fmt.Errorf("Chunk.GetChunksInRange: incorrect number of chunks, expected: %v, got: %v, start index: %v, end index: %v", endIndex-startIndex+1, len(chunks), startIndex, endIndex)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetUnbatchedChunks retrieves unbatched chunks from the database.
|
||||
func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash IS NULL")
|
||||
db = db.Order("index asc")
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetUnbatchedChunks error: %w", err)
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetLatestChunk retrieves the latest chunk from the database.
|
||||
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Order("index desc")
|
||||
|
||||
var latestChunk Chunk
|
||||
if err := db.First(&latestChunk).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetLatestChunk error: %w", err)
|
||||
}
|
||||
return &latestChunk, nil
|
||||
}
|
||||
|
||||
// InsertChunk inserts a new chunk into the database.
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
|
||||
var chunkIndex uint64
|
||||
var totalL1MessagePoppedBefore uint64
|
||||
parentChunk, err := o.GetLatestChunk(ctx)
|
||||
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
|
||||
log.Error("failed to get latest chunk", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
// if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's
|
||||
// not chunk record in the db, we then use default empty values for the creating chunk;
|
||||
// if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk
|
||||
if parentChunk != nil {
|
||||
chunkIndex = parentChunk.Index + 1
|
||||
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + uint64(parentChunk.TotalL1MessagesPoppedInChunk)
|
||||
}
|
||||
|
||||
hash, err := chunk.Hash(totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("failed to get chunk hash", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
var totalL2TxGas uint64
|
||||
var totalL2TxNum uint64
|
||||
var totalL1CommitCalldataSize uint64
|
||||
var totalL1CommitGas uint64
|
||||
for _, block := range chunk.Blocks {
|
||||
totalL2TxGas += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas += block.EstimateL1CommitGas()
|
||||
}
|
||||
|
||||
numBlocks := len(chunk.Blocks)
|
||||
newChunk := Chunk{
|
||||
Index: chunkIndex,
|
||||
Hash: hash.Hex(),
|
||||
StartBlockNumber: chunk.Blocks[0].Header.Number.Uint64(),
|
||||
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
|
||||
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
|
||||
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
|
||||
TotalL2TxGas: totalL2TxGas,
|
||||
TotalL2TxNum: uint32(totalL2TxNum),
|
||||
TotalL1CommitCalldataSize: uint32(totalL1CommitCalldataSize),
|
||||
TotalL1CommitGas: totalL1CommitGas,
|
||||
StartBlockTime: chunk.Blocks[0].Header.Time,
|
||||
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
|
||||
TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
|
||||
if err := db.Create(&newChunk).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash)
|
||||
}
|
||||
|
||||
return &newChunk, nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a chunk.
|
||||
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Chunk.UpdateProvingStatus error: %w, chunk hash: %v, status: %v", err, hash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive).
|
||||
// The range is closed, i.e., it includes both start and end indices.
|
||||
func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
|
||||
|
||||
if err := db.Update("batch_hash", batchHash).Error; err != nil {
|
||||
return fmt.Errorf("Chunk.UpdateBatchHashInRange error: %w, start index: %v, end index: %v, batch hash: %v", err, startIndex, endIndex, batchHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
38
bridge/internal/orm/common.go
Normal file
38
bridge/internal/orm/common.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"gorm.io/gorm"
|
||||
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
)
|
||||
|
||||
// AddBatchInfoToDB inserts the batch information to the BlockBatch table and updates the batch_hash
|
||||
// in all blocks included in the batch.
|
||||
func AddBatchInfoToDB(db *gorm.DB, batchData *bridgeTypes.BatchData) error {
|
||||
blockBatch := NewBlockBatch(db)
|
||||
blockTrace := NewBlockTrace(db)
|
||||
err := db.Transaction(func(tx *gorm.DB) error {
|
||||
rowsAffected, dbTxErr := blockBatch.InsertBlockBatchByBatchData(tx, batchData)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
if rowsAffected != 1 {
|
||||
dbTxErr = errors.New("the InsertBlockBatchByBatchData affected row is not 1")
|
||||
return dbTxErr
|
||||
}
|
||||
|
||||
var blockIDs = make([]uint64, len(batchData.Batch.Blocks))
|
||||
for i, block := range batchData.Batch.Blocks {
|
||||
blockIDs[i] = block.BlockNumber
|
||||
}
|
||||
|
||||
dbTxErr = blockTrace.UpdateBatchHashForL2Blocks(tx, blockIDs, batchData.Hash().Hex())
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -2,9 +2,8 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
@@ -14,19 +13,14 @@ import (
|
||||
type L1Block struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
// block
|
||||
Number uint64 `json:"number" gorm:"column:number"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
|
||||
|
||||
// oracle
|
||||
GasOracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
Number uint64 `json:"number" gorm:"column:number"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
HeaderRLP string `json:"header_rlp" gorm:"column:header_rlp"`
|
||||
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
|
||||
BlockStatus int `json:"block_status" gorm:"column:block_status;default:1"`
|
||||
ImportTxHash string `json:"import_tx_hash" gorm:"column:import_tx_hash;default:NULL"`
|
||||
GasOracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewL1Block create an l1Block instance
|
||||
@@ -40,64 +34,54 @@ func (*L1Block) TableName() string {
|
||||
}
|
||||
|
||||
// GetLatestL1BlockHeight get the latest l1 block height
|
||||
func (o *L1Block) GetLatestL1BlockHeight(ctx context.Context) (uint64, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L1Block{})
|
||||
db = db.Select("COALESCE(MAX(number), 0)")
|
||||
func (l *L1Block) GetLatestL1BlockHeight() (uint64, error) {
|
||||
result := l.db.Model(&L1Block{}).Select("COALESCE(MAX(number), 0)").Row()
|
||||
if result.Err() != nil {
|
||||
return 0, result.Err()
|
||||
}
|
||||
|
||||
var maxNumber uint64
|
||||
if err := db.Row().Scan(&maxNumber); err != nil {
|
||||
return 0, fmt.Errorf("L1Block.GetLatestL1BlockHeight error: %w", err)
|
||||
if err := result.Scan(&maxNumber); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return maxNumber, nil
|
||||
}
|
||||
|
||||
// GetL1Blocks get the l1 blocks
|
||||
func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}) ([]L1Block, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L1Block{})
|
||||
|
||||
func (l *L1Block) GetL1Blocks(fields map[string]interface{}) ([]L1Block, error) {
|
||||
var l1Blocks []L1Block
|
||||
db := l.db
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
|
||||
db = db.Order("number ASC")
|
||||
|
||||
var l1Blocks []L1Block
|
||||
if err := db.Find(&l1Blocks).Error; err != nil {
|
||||
return nil, fmt.Errorf("L1Block.GetL1Blocks error: %w, fields: %v", err, fields)
|
||||
return nil, err
|
||||
}
|
||||
return l1Blocks, nil
|
||||
}
|
||||
|
||||
// InsertL1Blocks batch insert l1 blocks
|
||||
func (o *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
|
||||
func (l *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {
|
||||
if len(blocks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L1Block{})
|
||||
|
||||
if err := db.Create(&blocks).Error; err != nil {
|
||||
return fmt.Errorf("L1Block.InsertL1Blocks error: %w", err)
|
||||
err := l.db.WithContext(ctx).Create(&blocks).Error
|
||||
if err != nil {
|
||||
log.Error("failed to insert L1 Blocks", "err", err)
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateL1GasOracleStatusAndOracleTxHash update l1 gas oracle status and oracle tx hash
|
||||
func (o *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
|
||||
func (l *L1Block) UpdateL1GasOracleStatusAndOracleTxHash(ctx context.Context, blockHash string, status types.GasOracleStatus, txHash string) error {
|
||||
updateFields := map[string]interface{}{
|
||||
"oracle_status": int(status),
|
||||
"oracle_tx_hash": txHash,
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L1Block{})
|
||||
db = db.Where("hash", blockHash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("L1Block.UpdateL1GasOracleStatusAndOracleTxHash error: %w, block hash: %v, status: %v, tx hash: %v", err, blockHash, status.String(), txHash)
|
||||
if err := l.db.WithContext(ctx).Model(&L1Block{}).Where("hash", blockHash).Updates(updateFields).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package orm
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
@@ -26,11 +25,6 @@ type L1Message struct {
|
||||
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash"`
|
||||
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:NULL"`
|
||||
Status int `json:"status" gorm:"column:status;default:1"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewL1Message create an L1MessageOrm instance
|
||||
@@ -58,16 +52,6 @@ func (m *L1Message) GetLayer1LatestWatchedHeight() (int64, error) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// GetLayer1LatestMessageWithLayer2Hash returns latest l1 message with layer2 hash
|
||||
func (m *L1Message) GetLayer1LatestMessageWithLayer2Hash() (*L1Message, error) {
|
||||
var msg *L1Message
|
||||
err := m.db.Where("layer2_hash IS NOT NULL").Order("queue_index DESC").First(&msg).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
|
||||
func (m *L1Message) GetL1MessagesByStatus(status types.MsgStatus, limit uint64) ([]L1Message, error) {
|
||||
var msgs []L1Message
|
||||
|
||||
@@ -1,233 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// L2Block represents a l2 block in the database.
|
||||
type L2Block struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
// block
|
||||
Number uint64 `json:"number" gorm:"number"`
|
||||
Hash string `json:"hash" gorm:"hash"`
|
||||
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
|
||||
Header string `json:"header" gorm:"header"`
|
||||
Transactions string `json:"transactions" gorm:"transactions"`
|
||||
WithdrawTrieRoot string `json:"withdraw_trie_root" gorm:"withdraw_trie_root"`
|
||||
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
|
||||
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
|
||||
|
||||
// chunk
|
||||
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewL2Block creates a new L2Block instance
|
||||
func NewL2Block(db *gorm.DB) *L2Block {
|
||||
return &L2Block{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the name of the "l2_block" table.
|
||||
func (*L2Block) TableName() string {
|
||||
return "l2_block"
|
||||
}
|
||||
|
||||
// GetL2BlocksLatestHeight retrieves the height of the latest L2 block.
|
||||
// If the l2_block table is empty, it returns 0 to represent the genesis block height.
|
||||
func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("COALESCE(MAX(number), 0)")
|
||||
|
||||
var maxNumber uint64
|
||||
if err := db.Row().Scan(&maxNumber); err != nil {
|
||||
return 0, fmt.Errorf("L2Block.GetL2BlocksLatestHeight error: %w", err)
|
||||
}
|
||||
return maxNumber, nil
|
||||
}
|
||||
|
||||
// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk.
|
||||
// The returned blocks are sorted in ascending order by their block number.
|
||||
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_trie_root")
|
||||
db = db.Where("chunk_hash IS NULL")
|
||||
db = db.Order("number ASC")
|
||||
|
||||
var l2Blocks []L2Block
|
||||
if err := db.Find(&l2Blocks).Error; err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
|
||||
}
|
||||
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
for _, v := range l2Blocks {
|
||||
var wrappedBlock types.WrappedBlock
|
||||
|
||||
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
|
||||
}
|
||||
|
||||
wrappedBlock.Header = &gethTypes.Header{}
|
||||
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
|
||||
}
|
||||
|
||||
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
}
|
||||
|
||||
return wrappedBlocks, nil
|
||||
}
|
||||
|
||||
// GetL2Blocks retrieves selected L2Blocks from the database.
|
||||
// The returned L2Blocks are sorted in ascending order by their block number.
|
||||
func (o *L2Block) GetL2Blocks(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*L2Block, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
|
||||
for _, orderBy := range orderByList {
|
||||
db = db.Order(orderBy)
|
||||
}
|
||||
|
||||
if limit > 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
db = db.Order("number ASC")
|
||||
|
||||
var l2Blocks []*L2Block
|
||||
if err := db.Find(&l2Blocks).Error; err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2Blocks error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
|
||||
}
|
||||
return l2Blocks, nil
|
||||
}
|
||||
|
||||
// GetL2BlocksInRange retrieves the L2 blocks within the specified range (inclusive).
|
||||
// The range is closed, i.e., it includes both start and end block numbers.
|
||||
// The returned blocks are sorted in ascending order by their block number.
|
||||
func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*types.WrappedBlock, error) {
|
||||
if startBlockNumber > endBlockNumber {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: start block number should be less than or equal to end block number, start block: %v, end block: %v", startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_trie_root")
|
||||
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
|
||||
db = db.Order("number ASC")
|
||||
|
||||
var l2Blocks []L2Block
|
||||
if err := db.Find(&l2Blocks).Error; err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
// sanity check
|
||||
if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: unexpected number of results, expected: %v, got: %v", endBlockNumber-startBlockNumber+1, len(l2Blocks))
|
||||
}
|
||||
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
for _, v := range l2Blocks {
|
||||
var wrappedBlock types.WrappedBlock
|
||||
|
||||
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
wrappedBlock.Header = &gethTypes.Header{}
|
||||
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
wrappedBlock.WithdrawTrieRoot = common.HexToHash(v.WithdrawTrieRoot)
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
}
|
||||
|
||||
return wrappedBlocks, nil
|
||||
}
|
||||
|
||||
// InsertL2Blocks inserts l2 blocks into the "l2_block" table.
|
||||
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlock) error {
|
||||
var l2Blocks []L2Block
|
||||
for _, block := range blocks {
|
||||
header, err := json.Marshal(block.Header)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err)
|
||||
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
|
||||
}
|
||||
|
||||
txs, err := json.Marshal(block.Transactions)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err)
|
||||
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
|
||||
}
|
||||
|
||||
l2Block := L2Block{
|
||||
Number: block.Header.Number.Uint64(),
|
||||
Hash: block.Header.Hash().String(),
|
||||
ParentHash: block.Header.ParentHash.String(),
|
||||
Transactions: string(txs),
|
||||
WithdrawTrieRoot: block.WithdrawTrieRoot.Hex(),
|
||||
TxNum: uint32(len(block.Transactions)),
|
||||
GasUsed: block.Header.GasUsed,
|
||||
BlockTimestamp: block.Header.Time,
|
||||
Header: string(header),
|
||||
}
|
||||
l2Blocks = append(l2Blocks, l2Block)
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
|
||||
if err := db.Create(&l2Blocks).Error; err != nil {
|
||||
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateChunkHashInRange updates the chunk_hash of block tx within the specified range (inclusive).
|
||||
// The range is closed, i.e., it includes both start and end indices.
|
||||
// This function ensures the number of rows updated must equal to (endIndex - startIndex + 1).
|
||||
// If the rows affected do not match this expectation, an error is returned.
|
||||
func (o *L2Block) UpdateChunkHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, chunkHash string, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Where("number >= ? AND number <= ?", startIndex, endIndex)
|
||||
|
||||
tx := db.Update("chunk_hash", chunkHash)
|
||||
if tx.Error != nil {
|
||||
return fmt.Errorf("L2Block.UpdateChunkHashInRange error: %w, start index: %v, end index: %v, chunk hash: %v", tx.Error, startIndex, endIndex, chunkHash)
|
||||
}
|
||||
|
||||
// sanity check
|
||||
if uint64(tx.RowsAffected) != endIndex-startIndex+1 {
|
||||
return fmt.Errorf("L2Block.UpdateChunkHashInRange: incorrect number of rows affected, expected: %v, got: %v", endIndex-startIndex+1, tx.RowsAffected)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
127
bridge/internal/orm/l2_message.go
Normal file
127
bridge/internal/orm/l2_message.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// L2Message is structure of stored layer2 bridge message
|
||||
type L2Message struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
Nonce uint64 `json:"nonce" gorm:"column:nonce"`
|
||||
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
|
||||
Height uint64 `json:"height" gorm:"column:height"`
|
||||
Sender string `json:"sender" gorm:"column:sender"`
|
||||
Value string `json:"value" gorm:"column:value"`
|
||||
Target string `json:"target" gorm:"column:target"`
|
||||
Calldata string `json:"calldata" gorm:"column:calldata"`
|
||||
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash"`
|
||||
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:NULL"`
|
||||
Proof string `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
Status int `json:"status" gorm:"column:status;default:1"`
|
||||
}
|
||||
|
||||
// NewL2Message create an L2Message instance
|
||||
func NewL2Message(db *gorm.DB) *L2Message {
|
||||
return &L2Message{db: db}
|
||||
}
|
||||
|
||||
// TableName define the L2Message table name
|
||||
func (*L2Message) TableName() string {
|
||||
return "l2_message"
|
||||
}
|
||||
|
||||
// GetL2Messages fetch list of messages given msg status
|
||||
func (m *L2Message) GetL2Messages(fields map[string]interface{}, orderByList []string, limit int) ([]L2Message, error) {
|
||||
var l2MsgList []L2Message
|
||||
db := m.db
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
|
||||
for _, orderBy := range orderByList {
|
||||
db = db.Order(orderBy)
|
||||
}
|
||||
|
||||
if limit != 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
if err := db.Find(&l2MsgList).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return l2MsgList, nil
|
||||
}
|
||||
|
||||
// GetLayer2LatestWatchedHeight returns latest height stored in the table
|
||||
func (m *L2Message) GetLayer2LatestWatchedHeight() (int64, error) {
|
||||
// @note It's not correct, since we may don't have message in some blocks.
|
||||
// But it will only be called at start, some redundancy is acceptable.
|
||||
result := m.db.Model(&L2Message{}).Select("COALESCE(MAX(height), -1)").Row()
|
||||
if result.Err() != nil {
|
||||
return -1, result.Err()
|
||||
}
|
||||
|
||||
var maxNumber int64
|
||||
if err := result.Scan(&maxNumber); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return maxNumber, nil
|
||||
}
|
||||
|
||||
// GetL2MessageByNonce fetch message by nonce
|
||||
// for unit test
|
||||
func (m *L2Message) GetL2MessageByNonce(nonce uint64) (*L2Message, error) {
|
||||
var msg L2Message
|
||||
err := m.db.Where("nonce", nonce).First(&msg).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &msg, nil
|
||||
}
|
||||
|
||||
// SaveL2Messages batch save a list of layer2 messages
|
||||
func (m *L2Message) SaveL2Messages(ctx context.Context, messages []L2Message) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := m.db.WithContext(ctx).Create(&messages).Error
|
||||
if err != nil {
|
||||
nonces := make([]uint64, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
nonces = append(nonces, msg.Nonce)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert layer2Messages", "nonces", nonces, "heights", heights, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateLayer2Status updates message stauts, given message hash
|
||||
func (m *L2Message) UpdateLayer2Status(ctx context.Context, msgHash string, status types.MsgStatus) error {
|
||||
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Update("status", int(status)).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLayer2StatusAndLayer1Hash updates message stauts and layer1 transaction hash, given message hash
|
||||
func (m *L2Message) UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status types.MsgStatus, layer1Hash string) error {
|
||||
updateFields := map[string]interface{}{
|
||||
"status": int(status),
|
||||
"layer1_hash": layer1Hash,
|
||||
}
|
||||
err := m.db.Model(&L2Message{}).WithContext(ctx).Where("msg_hash", msgHash).Updates(updateFields).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
61
bridge/internal/orm/migrate/migrate.go
Normal file
61
bridge/internal/orm/migrate/migrate.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"embed"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
//go:embed migrations/*.sql
|
||||
var embedMigrations embed.FS
|
||||
|
||||
// MigrationsDir migration dir
|
||||
const MigrationsDir string = "migrations"
|
||||
|
||||
func init() {
|
||||
goose.SetBaseFS(embedMigrations)
|
||||
goose.SetSequential(true)
|
||||
goose.SetTableName("scroll_migrations")
|
||||
|
||||
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
|
||||
goose.SetVerbose(verbose)
|
||||
}
|
||||
|
||||
// Migrate migrate db
|
||||
func Migrate(db *sql.DB) error {
|
||||
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
|
||||
}
|
||||
|
||||
// Rollback rollback to the given version
|
||||
func Rollback(db *sql.DB, version *int64) error {
|
||||
if version != nil {
|
||||
return goose.DownTo(db, MigrationsDir, *version)
|
||||
}
|
||||
return goose.Down(db, MigrationsDir)
|
||||
}
|
||||
|
||||
// ResetDB clean and migrate db.
|
||||
func ResetDB(db *sql.DB) error {
|
||||
if err := Rollback(db, new(int64)); err != nil {
|
||||
return err
|
||||
}
|
||||
return Migrate(db)
|
||||
}
|
||||
|
||||
// Current get current version
|
||||
func Current(db *sql.DB) (int64, error) {
|
||||
return goose.GetDBVersion(db)
|
||||
}
|
||||
|
||||
// Status is normal or not
|
||||
func Status(db *sql.DB) error {
|
||||
return goose.Version(db, MigrationsDir)
|
||||
}
|
||||
|
||||
// Create a new migration folder
|
||||
func Create(db *sql.DB, name, migrationType string) error {
|
||||
return goose.Create(db, MigrationsDir, name, migrationType)
|
||||
}
|
||||
86
bridge/internal/orm/migrate/migrate_test.go
Normal file
86
bridge/internal/orm/migrate/migrate_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/database"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
pgDB *sqlx.DB
|
||||
)
|
||||
|
||||
func initEnv(t *testing.T) error {
|
||||
// Start db container.
|
||||
base.RunDBImage(t)
|
||||
|
||||
// Create db orm handler.
|
||||
factory, err := database.NewOrmFactory(base.DBConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pgDB = factory.GetDB()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
if err := initEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("testCurrent", testCurrent)
|
||||
t.Run("testStatus", testStatus)
|
||||
t.Run("testResetDB", testResetDB)
|
||||
t.Run("testMigrate", testMigrate)
|
||||
t.Run("testRollback", testRollback)
|
||||
|
||||
t.Cleanup(func() {
|
||||
base.Free()
|
||||
})
|
||||
}
|
||||
|
||||
func testCurrent(t *testing.T) {
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, int(cur))
|
||||
}
|
||||
|
||||
func testStatus(t *testing.T) {
|
||||
status := Status(pgDB.DB)
|
||||
assert.NoError(t, status)
|
||||
}
|
||||
|
||||
func testResetDB(t *testing.T) {
|
||||
assert.NoError(t, ResetDB(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, 5, int(cur))
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, cur > 0)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, version > 0)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB.DB, nil))
|
||||
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, cur+1 == version)
|
||||
}
|
||||
38
bridge/internal/orm/migrate/migrations/00001_block_trace.sql
Normal file
38
bridge/internal/orm/migrate/migrations/00001_block_trace.sql
Normal file
@@ -0,0 +1,38 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
-- TODO: use foreign key for batch_id?
|
||||
-- TODO: why tx_num is bigint?
|
||||
create table block_trace
|
||||
(
|
||||
number BIGINT NOT NULL,
|
||||
hash VARCHAR NOT NULL,
|
||||
parent_hash VARCHAR NOT NULL,
|
||||
trace JSON NOT NULL,
|
||||
batch_hash VARCHAR DEFAULT NULL,
|
||||
tx_num INTEGER NOT NULL,
|
||||
gas_used BIGINT NOT NULL,
|
||||
block_timestamp NUMERIC NOT NULL
|
||||
);
|
||||
|
||||
create unique index block_trace_hash_uindex
|
||||
on block_trace (hash);
|
||||
|
||||
create unique index block_trace_number_uindex
|
||||
on block_trace (number);
|
||||
|
||||
create unique index block_trace_parent_uindex
|
||||
on block_trace (number, parent_hash);
|
||||
|
||||
create unique index block_trace_parent_hash_uindex
|
||||
on block_trace (hash, parent_hash);
|
||||
|
||||
create index block_trace_batch_hash_index
|
||||
on block_trace (batch_hash);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists block_trace;
|
||||
-- +goose StatementEnd
|
||||
50
bridge/internal/orm/migrate/migrations/00002_l1_message.sql
Normal file
50
bridge/internal/orm/migrate/migrations/00002_l1_message.sql
Normal file
@@ -0,0 +1,50 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table l1_message
|
||||
(
|
||||
queue_index BIGINT NOT NULL,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
gas_limit BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
calldata TEXT NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL,
|
||||
layer2_hash VARCHAR DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
|
||||
|
||||
create unique index l1_message_hash_uindex
|
||||
on l1_message (msg_hash);
|
||||
|
||||
create unique index l1_message_nonce_uindex
|
||||
on l1_message (queue_index);
|
||||
|
||||
create index l1_message_height_index
|
||||
on l1_message (height);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_time = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON l1_message FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l1_message;
|
||||
-- +goose StatementEnd
|
||||
50
bridge/internal/orm/migrate/migrations/00003_l2_message.sql
Normal file
50
bridge/internal/orm/migrate/migrations/00003_l2_message.sql
Normal file
@@ -0,0 +1,50 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table l2_message
|
||||
(
|
||||
nonce BIGINT NOT NULL,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
calldata TEXT NOT NULL,
|
||||
layer2_hash VARCHAR NOT NULL,
|
||||
layer1_hash VARCHAR DEFAULT NULL,
|
||||
proof TEXT DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
|
||||
|
||||
create unique index l2_message_hash_uindex
|
||||
on l2_message (msg_hash);
|
||||
|
||||
create unique index l2_message_nonce_uindex
|
||||
on l2_message (nonce);
|
||||
|
||||
create index l2_message_height_index
|
||||
on l2_message (height);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_time = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON l2_message FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l2_message;
|
||||
-- +goose StatementEnd
|
||||
50
bridge/internal/orm/migrate/migrations/00004_block_batch.sql
Normal file
50
bridge/internal/orm/migrate/migrations/00004_block_batch.sql
Normal file
@@ -0,0 +1,50 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
create table block_batch
|
||||
(
|
||||
hash VARCHAR NOT NULL,
|
||||
index BIGINT NOT NULL,
|
||||
start_block_number BIGINT NOT NULL,
|
||||
start_block_hash VARCHAR NOT NULL,
|
||||
end_block_number BIGINT NOT NULL,
|
||||
end_block_hash VARCHAR NOT NULL,
|
||||
parent_hash VARCHAR NOT NULL,
|
||||
state_root VARCHAR NOT NULL,
|
||||
total_tx_num BIGINT NOT NULL,
|
||||
total_l1_tx_num BIGINT NOT NULL,
|
||||
total_l2_gas BIGINT NOT NULL,
|
||||
proving_status INTEGER DEFAULT 1,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
instance_commitments BYTEA DEFAULT NULL,
|
||||
proof_time_sec INTEGER DEFAULT 0,
|
||||
rollup_status INTEGER DEFAULT 1,
|
||||
commit_tx_hash VARCHAR DEFAULT NULL,
|
||||
finalize_tx_hash VARCHAR DEFAULT NULL,
|
||||
oracle_status INTEGER DEFAULT 1,
|
||||
oracle_tx_hash VARCHAR DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
|
||||
proved_at TIMESTAMP(0) DEFAULT NULL,
|
||||
committed_at TIMESTAMP(0) DEFAULT NULL,
|
||||
finalized_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column block_batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
|
||||
comment
|
||||
on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed';
|
||||
comment
|
||||
on column block_batch.oracle_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
create unique index block_batch_hash_uindex
|
||||
on block_batch (hash);
|
||||
create unique index block_batch_index_uindex
|
||||
on block_batch (index);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists block_batch;
|
||||
-- +goose StatementEnd
|
||||
@@ -3,21 +3,19 @@
|
||||
|
||||
create table l1_block
|
||||
(
|
||||
-- block
|
||||
number BIGINT NOT NULL,
|
||||
hash VARCHAR NOT NULL,
|
||||
header_rlp TEXT NOT NULL,
|
||||
base_fee BIGINT NOT NULL,
|
||||
|
||||
-- oracle
|
||||
oracle_status SMALLINT NOT NULL DEFAULT 1,
|
||||
oracle_tx_hash VARCHAR DEFAULT NULL,
|
||||
|
||||
-- metadata
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
block_status INTEGER DEFAULT 1,
|
||||
import_tx_hash VARCHAR DEFAULT NULL,
|
||||
oracle_status INTEGER DEFAULT 1,
|
||||
oracle_tx_hash VARCHAR DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column l1_block.block_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
comment
|
||||
on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed';
|
||||
|
||||
@@ -32,4 +30,4 @@ on l1_block (number);
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l1_block;
|
||||
-- +goose StatementEnd
|
||||
-- +goose StatementEnd
|
||||
@@ -1,240 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *L2Block
|
||||
chunkOrm *Chunk
|
||||
batchOrm *Batch
|
||||
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
chunk1 *types.Chunk
|
||||
chunk2 *types.Chunk
|
||||
chunkHash1 common.Hash
|
||||
chunkHash2 common.Hash
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
t := &testing.T{}
|
||||
setupEnv(t)
|
||||
defer tearDownEnv(t)
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
var err error
|
||||
db, err = database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
batchOrm = NewBatch(db)
|
||||
chunkOrm = NewChunk(db)
|
||||
l2BlockOrm = NewL2Block(db)
|
||||
|
||||
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock1 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock2 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk1 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1}}
|
||||
chunkHash1, err = chunk1.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk2 = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock2}}
|
||||
chunkHash2, err = chunk2.Hash(chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func tearDownEnv(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
sqlDB.Close()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestL2BlockOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
height, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(3), height)
|
||||
|
||||
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 2)
|
||||
assert.Equal(t, wrappedBlock1, blocks[0])
|
||||
assert.Equal(t, wrappedBlock2, blocks[1])
|
||||
|
||||
blocks, err = l2BlockOrm.GetL2BlocksInRange(context.Background(), 2, 3)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 2)
|
||||
assert.Equal(t, wrappedBlock1, blocks[0])
|
||||
assert.Equal(t, wrappedBlock2, blocks[1])
|
||||
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 2, 2, "test hash")
|
||||
assert.NoError(t, err)
|
||||
|
||||
blocks, err = l2BlockOrm.GetUnchunkedBlocks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 1)
|
||||
assert.Equal(t, wrappedBlock2, blocks[0])
|
||||
}
|
||||
|
||||
func TestChunkOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
|
||||
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
|
||||
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
|
||||
|
||||
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash2.Hex(), types.ProvingTaskAssigned)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunks, err = chunkOrm.GetChunksInRange(context.Background(), 0, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(chunks[0].ProvingStatus))
|
||||
assert.Equal(t, types.ProvingTaskAssigned, types.ProvingStatus(chunks[1].ProvingStatus))
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
|
||||
assert.NoError(t, err)
|
||||
chunks, err = chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
}
|
||||
|
||||
func TestBatchOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
|
||||
assert.NoError(t, err)
|
||||
hash1 := batch1.Hash
|
||||
|
||||
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
batchHeader1, err := types.DecodeBatchHeader(batch1.BatchHeader)
|
||||
assert.NoError(t, err)
|
||||
batchHash1 := batchHeader1.Hash().Hex()
|
||||
assert.Equal(t, hash1, batchHash1)
|
||||
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
|
||||
assert.NoError(t, err)
|
||||
hash2 := batch2.Hash
|
||||
|
||||
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
|
||||
assert.NoError(t, err)
|
||||
batchHeader2, err := types.DecodeBatchHeader(batch2.BatchHeader)
|
||||
assert.NoError(t, err)
|
||||
batchHash2 := batchHeader2.Hash().Hex()
|
||||
assert.Equal(t, hash2, batchHash2)
|
||||
|
||||
count, err := batchOrm.GetBatchCount(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2), count)
|
||||
|
||||
pendingBatches, err := batchOrm.GetPendingBatches(context.Background(), 100)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(pendingBatches))
|
||||
|
||||
rollupStatus, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash1, batchHash2})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(rollupStatus))
|
||||
assert.Equal(t, types.RollupPending, rollupStatus[0])
|
||||
assert.Equal(t, types.RollupPending, rollupStatus[1])
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, dbProof)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
|
||||
assert.NoError(t, err)
|
||||
|
||||
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
|
||||
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
|
||||
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash2, "commitTxHash", types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
|
||||
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
|
||||
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
|
||||
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
}
|
||||
236
bridge/internal/types/batch.go
Normal file
236
bridge/internal/types/batch.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
abi "scroll-tech/bridge/abi"
|
||||
)
|
||||
|
||||
// PublicInputHashConfig is the configuration of how to compute the public input hash.
|
||||
type PublicInputHashConfig struct {
|
||||
MaxTxNum int `json:"max_tx_num"`
|
||||
PaddingTxHash common.Hash `json:"padding_tx_hash"`
|
||||
}
|
||||
|
||||
const defaultMaxTxNum = 44
|
||||
|
||||
var defaultPaddingTxHash = [32]byte{}
|
||||
|
||||
// BatchData contains info of batch to be committed.
|
||||
type BatchData struct {
|
||||
Batch abi.IScrollChainBatch
|
||||
TxHashes []common.Hash
|
||||
TotalTxNum uint64
|
||||
TotalL1TxNum uint64
|
||||
TotalL2Gas uint64
|
||||
|
||||
// cache for the BatchHash
|
||||
hash *common.Hash
|
||||
// The config to compute the public input hash, or the block hash.
|
||||
// If it is nil, the hash calculation will use `defaultMaxTxNum` and `defaultPaddingTxHash`.
|
||||
piCfg *PublicInputHashConfig
|
||||
}
|
||||
|
||||
// Timestamp returns the timestamp of the first block in the BlockData.
|
||||
func (b *BatchData) Timestamp() uint64 {
|
||||
if len(b.Batch.Blocks) == 0 {
|
||||
return 0
|
||||
}
|
||||
return b.Batch.Blocks[0].Timestamp
|
||||
}
|
||||
|
||||
// Hash calculates the hash of this batch.
|
||||
func (b *BatchData) Hash() *common.Hash {
|
||||
if b.hash != nil {
|
||||
return b.hash
|
||||
}
|
||||
|
||||
buf := make([]byte, 8)
|
||||
hasher := crypto.NewKeccakState()
|
||||
|
||||
// 1. hash PrevStateRoot, NewStateRoot, WithdrawTrieRoot
|
||||
// @todo: panic on error here.
|
||||
_, _ = hasher.Write(b.Batch.PrevStateRoot[:])
|
||||
_, _ = hasher.Write(b.Batch.NewStateRoot[:])
|
||||
_, _ = hasher.Write(b.Batch.WithdrawTrieRoot[:])
|
||||
|
||||
// 2. hash all block contexts
|
||||
for _, block := range b.Batch.Blocks {
|
||||
// write BlockHash & ParentHash
|
||||
_, _ = hasher.Write(block.BlockHash[:])
|
||||
_, _ = hasher.Write(block.ParentHash[:])
|
||||
// write BlockNumber
|
||||
binary.BigEndian.PutUint64(buf, block.BlockNumber)
|
||||
_, _ = hasher.Write(buf)
|
||||
// write Timestamp
|
||||
binary.BigEndian.PutUint64(buf, block.Timestamp)
|
||||
_, _ = hasher.Write(buf)
|
||||
// write BaseFee
|
||||
var baseFee [32]byte
|
||||
if block.BaseFee != nil {
|
||||
baseFee = newByte32FromBytes(block.BaseFee.Bytes())
|
||||
}
|
||||
_, _ = hasher.Write(baseFee[:])
|
||||
// write GasLimit
|
||||
binary.BigEndian.PutUint64(buf, block.GasLimit)
|
||||
_, _ = hasher.Write(buf)
|
||||
// write NumTransactions
|
||||
binary.BigEndian.PutUint16(buf[:2], block.NumTransactions)
|
||||
_, _ = hasher.Write(buf[:2])
|
||||
// write NumL1Messages
|
||||
binary.BigEndian.PutUint16(buf[:2], block.NumL1Messages)
|
||||
_, _ = hasher.Write(buf[:2])
|
||||
}
|
||||
|
||||
// 3. add all tx hashes
|
||||
for _, txHash := range b.TxHashes {
|
||||
_, _ = hasher.Write(txHash[:])
|
||||
}
|
||||
|
||||
// 4. append empty tx hash up to MaxTxNum
|
||||
maxTxNum := defaultMaxTxNum
|
||||
paddingTxHash := common.Hash(defaultPaddingTxHash)
|
||||
if b.piCfg != nil {
|
||||
maxTxNum = b.piCfg.MaxTxNum
|
||||
paddingTxHash = b.piCfg.PaddingTxHash
|
||||
}
|
||||
for i := len(b.TxHashes); i < maxTxNum; i++ {
|
||||
_, _ = hasher.Write(paddingTxHash[:])
|
||||
}
|
||||
|
||||
b.hash = new(common.Hash)
|
||||
_, _ = hasher.Read(b.hash[:])
|
||||
|
||||
return b.hash
|
||||
}
|
||||
|
||||
// NewBatchData creates a BatchData given the parent batch information and the traces of the blocks
|
||||
// included in this batch
|
||||
func NewBatchData(parentBatch *BatchInfo, blocks []*WrappedBlock, piCfg *PublicInputHashConfig) *BatchData {
|
||||
batchData := new(BatchData)
|
||||
batch := &batchData.Batch
|
||||
|
||||
// set BatchIndex, ParentBatchHash
|
||||
batch.BatchIndex = parentBatch.Index + 1
|
||||
batch.ParentBatchHash = common.HexToHash(parentBatch.Hash)
|
||||
batch.Blocks = make([]abi.IScrollChainBlockContext, len(blocks))
|
||||
|
||||
var batchTxDataBuf bytes.Buffer
|
||||
batchTxDataWriter := bufio.NewWriter(&batchTxDataBuf)
|
||||
|
||||
for i, block := range blocks {
|
||||
batchData.TotalTxNum += uint64(len(block.Transactions))
|
||||
batchData.TotalL2Gas += block.Header.GasUsed
|
||||
|
||||
// set baseFee to 0 when it's nil in the block header
|
||||
baseFee := block.Header.BaseFee
|
||||
if baseFee == nil {
|
||||
baseFee = big.NewInt(0)
|
||||
}
|
||||
|
||||
batch.Blocks[i] = abi.IScrollChainBlockContext{
|
||||
BlockHash: block.Header.Hash(),
|
||||
ParentHash: block.Header.ParentHash,
|
||||
BlockNumber: block.Header.Number.Uint64(),
|
||||
Timestamp: block.Header.Time,
|
||||
BaseFee: baseFee,
|
||||
GasLimit: block.Header.GasLimit,
|
||||
NumTransactions: uint16(len(block.Transactions)),
|
||||
NumL1Messages: 0, // TODO: currently use 0, will re-enable after we use l2geth to include L1 messages
|
||||
}
|
||||
|
||||
// fill in RLP-encoded transactions
|
||||
for _, txData := range block.Transactions {
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
// right now we only support legacy tx
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
var txLen [4]byte
|
||||
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
|
||||
_, _ = batchTxDataWriter.Write(txLen[:])
|
||||
_, _ = batchTxDataWriter.Write(rlpTxData)
|
||||
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
batch.PrevStateRoot = common.HexToHash(parentBatch.StateRoot)
|
||||
}
|
||||
|
||||
// set NewStateRoot & WithdrawTrieRoot from the last block
|
||||
if i == len(blocks)-1 {
|
||||
batch.NewStateRoot = block.Header.Root
|
||||
batch.WithdrawTrieRoot = block.WithdrawTrieRoot
|
||||
}
|
||||
}
|
||||
|
||||
if err := batchTxDataWriter.Flush(); err != nil {
|
||||
panic("Buffered I/O flush failed")
|
||||
}
|
||||
|
||||
batch.L2Transactions = batchTxDataBuf.Bytes()
|
||||
batchData.piCfg = piCfg
|
||||
|
||||
return batchData
|
||||
}
|
||||
|
||||
// NewGenesisBatchData generates the batch that contains the genesis block.
|
||||
func NewGenesisBatchData(genesisBlockTrace *WrappedBlock) *BatchData {
|
||||
header := genesisBlockTrace.Header
|
||||
if header.Number.Uint64() != 0 {
|
||||
panic("invalid genesis block trace: block number is not 0")
|
||||
}
|
||||
|
||||
batchData := new(BatchData)
|
||||
batch := &batchData.Batch
|
||||
|
||||
// fill in batch information
|
||||
batch.BatchIndex = 0
|
||||
batch.Blocks = make([]abi.IScrollChainBlockContext, 1)
|
||||
batch.NewStateRoot = header.Root
|
||||
// PrevStateRoot, WithdrawTrieRoot, ParentBatchHash should all be 0
|
||||
// L2Transactions should be empty
|
||||
|
||||
// fill in block context
|
||||
batch.Blocks[0] = abi.IScrollChainBlockContext{
|
||||
BlockHash: header.Hash(),
|
||||
ParentHash: header.ParentHash,
|
||||
BlockNumber: header.Number.Uint64(),
|
||||
Timestamp: header.Time,
|
||||
BaseFee: header.BaseFee,
|
||||
GasLimit: header.GasLimit,
|
||||
NumTransactions: 0,
|
||||
NumL1Messages: 0,
|
||||
}
|
||||
|
||||
return batchData
|
||||
}
|
||||
|
||||
// newByte32FromBytes converts the bytes in big-endian encoding to 32 bytes in big-endian encoding
|
||||
func newByte32FromBytes(b []byte) [32]byte {
|
||||
var byte32 [32]byte
|
||||
|
||||
if len(b) > 32 {
|
||||
b = b[len(b)-32:]
|
||||
}
|
||||
|
||||
copy(byte32[32-len(b):], b)
|
||||
return byte32
|
||||
}
|
||||
90
bridge/internal/types/batch_test.go
Normal file
90
bridge/internal/types/batch_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
abi "scroll-tech/bridge/abi"
|
||||
)
|
||||
|
||||
func TestBatchHash(t *testing.T) {
|
||||
txBytes := common.Hex2Bytes("02f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae")
|
||||
tx := new(gethTypes.Transaction)
|
||||
if err := tx.UnmarshalBinary(txBytes); err != nil {
|
||||
t.Fatalf("invalid tx hex string: %s", err)
|
||||
}
|
||||
|
||||
batchData := new(BatchData)
|
||||
batchData.TxHashes = append(batchData.TxHashes, tx.Hash())
|
||||
batchData.piCfg = &PublicInputHashConfig{
|
||||
MaxTxNum: 4,
|
||||
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
|
||||
}
|
||||
|
||||
batch := &batchData.Batch
|
||||
batch.PrevStateRoot = common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000cafe")
|
||||
|
||||
block := abi.IScrollChainBlockContext{
|
||||
BlockNumber: 51966,
|
||||
Timestamp: 123456789,
|
||||
BaseFee: new(big.Int).SetUint64(0),
|
||||
GasLimit: 10000000000000000,
|
||||
NumTransactions: 1,
|
||||
NumL1Messages: 0,
|
||||
}
|
||||
batch.Blocks = append(batch.Blocks, block)
|
||||
|
||||
hash := batchData.Hash()
|
||||
assert.Equal(t, *hash, common.HexToHash("0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805"))
|
||||
|
||||
// use a different tx hash
|
||||
txBytes = common.Hex2Bytes("f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc")
|
||||
tx = new(gethTypes.Transaction)
|
||||
if err := tx.UnmarshalBinary(txBytes); err != nil {
|
||||
t.Fatalf("invalid tx hex string: %s", err)
|
||||
}
|
||||
batchData.TxHashes[0] = tx.Hash()
|
||||
|
||||
batchData.hash = nil // clear the cache
|
||||
assert.Equal(t, *batchData.Hash(), common.HexToHash("0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b"))
|
||||
}
|
||||
|
||||
func TestNewGenesisBatch(t *testing.T) {
|
||||
genesisBlock := &gethTypes.Header{
|
||||
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||
Root: common.HexToHash("0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5"),
|
||||
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||
Difficulty: big.NewInt(1),
|
||||
Number: big.NewInt(0),
|
||||
GasLimit: 940000000,
|
||||
GasUsed: 0,
|
||||
Time: 1639724192,
|
||||
Extra: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000214f8d488aa9ebf83e30bad45fb8f9c8ee2509f5511caff794753d07e9dfb218cfc233bb62d2c57022783094e1a7edb6f069f8424bb68496a0926b130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
BaseFee: big.NewInt(1000000000),
|
||||
}
|
||||
assert.Equal(
|
||||
t,
|
||||
genesisBlock.Hash().Hex(),
|
||||
"0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
|
||||
"wrong genesis block header",
|
||||
)
|
||||
|
||||
blockTrace := &WrappedBlock{genesisBlock, nil, common.Hash{}}
|
||||
batchData := NewGenesisBatchData(blockTrace)
|
||||
t.Log(batchData.Batch.Blocks[0])
|
||||
batchData.piCfg = &PublicInputHashConfig{
|
||||
MaxTxNum: 25,
|
||||
PaddingTxHash: common.HexToHash("0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6"),
|
||||
}
|
||||
assert.Equal(
|
||||
t,
|
||||
batchData.Hash().Hex(),
|
||||
"0x65cf210e30f75cf8fd198df124255f73bc08d6324759e828a784fa938e7ac43d",
|
||||
"wrong genesis batch hash",
|
||||
)
|
||||
}
|
||||
43
bridge/internal/utils/db.go
Normal file
43
bridge/internal/utils/db.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
)
|
||||
|
||||
// InitDB init the db handler
|
||||
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
|
||||
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Info),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
|
||||
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
|
||||
|
||||
if err = sqlDB.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// CloseDB close the db handler. notice the db handler only can close when then program exit.
|
||||
func CloseDB(db *gorm.DB) error {
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := sqlDB.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,10 +1,6 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {BatchHeaderV0Codec} from "../../contracts/src/libraries/codec/BatchHeaderV0Codec.sol";
|
||||
import {ChunkCodec} from "../../contracts/src/libraries/codec/ChunkCodec.sol";
|
||||
import {IL1MessageQueue} from "../../contracts/src/L1/rollup/IL1MessageQueue.sol";
|
||||
|
||||
contract MockBridgeL1 {
|
||||
/******************************
|
||||
* Events from L1MessageQueue *
|
||||
@@ -21,7 +17,7 @@ contract MockBridgeL1 {
|
||||
address indexed sender,
|
||||
address indexed target,
|
||||
uint256 value,
|
||||
uint64 queueIndex,
|
||||
uint256 queueIndex,
|
||||
uint256 gasLimit,
|
||||
bytes data
|
||||
);
|
||||
@@ -50,27 +46,74 @@ contract MockBridgeL1 {
|
||||
/// @param messageHash The hash of the message.
|
||||
event RelayedMessage(bytes32 indexed messageHash);
|
||||
|
||||
/// @dev The maximum number of transaction in on batch.
|
||||
uint256 public immutable maxNumTxInBatch;
|
||||
|
||||
/// @dev The hash used for padding public inputs.
|
||||
bytes32 public immutable paddingTxHash;
|
||||
|
||||
/***************************
|
||||
* Events from ScrollChain *
|
||||
***************************/
|
||||
|
||||
/// @notice Emitted when a new batch is committed.
|
||||
/// @param batchHash The hash of the batch.
|
||||
/// @notice Emitted when a new batch is commited.
|
||||
/// @param batchHash The hash of the batch
|
||||
event CommitBatch(bytes32 indexed batchHash);
|
||||
|
||||
/// @notice Emitted when a batch is reverted.
|
||||
/// @param batchHash The identification of the batch.
|
||||
event RevertBatch(bytes32 indexed batchHash);
|
||||
|
||||
/// @notice Emitted when a batch is finalized.
|
||||
/// @param batchHash The hash of the batch
|
||||
/// @param stateRoot The state root in layer 2 after this batch.
|
||||
/// @param withdrawRoot The merkle root in layer2 after this batch.
|
||||
event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
|
||||
event FinalizeBatch(bytes32 indexed batchHash);
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct BlockContext {
|
||||
// The hash of this block.
|
||||
bytes32 blockHash;
|
||||
// The parent hash of this block.
|
||||
bytes32 parentHash;
|
||||
// The height of this block.
|
||||
uint64 blockNumber;
|
||||
// The timestamp of this block.
|
||||
uint64 timestamp;
|
||||
// The base fee of this block.
|
||||
// Currently, it is not used, because we disable EIP-1559.
|
||||
// We keep it for future proof.
|
||||
uint256 baseFee;
|
||||
// The gas limit of this block.
|
||||
uint64 gasLimit;
|
||||
// The number of transactions in this block, both L1 & L2 txs.
|
||||
uint16 numTransactions;
|
||||
// The number of l1 messages in this block.
|
||||
uint16 numL1Messages;
|
||||
}
|
||||
|
||||
struct Batch {
|
||||
// The list of blocks in this batch
|
||||
BlockContext[] blocks; // MAX_NUM_BLOCKS = 100, about 5 min
|
||||
// The state root of previous batch.
|
||||
// The first batch will use 0x0 for prevStateRoot
|
||||
bytes32 prevStateRoot;
|
||||
// The state root of the last block in this batch.
|
||||
bytes32 newStateRoot;
|
||||
// The withdraw trie root of the last block in this batch.
|
||||
bytes32 withdrawTrieRoot;
|
||||
// The index of the batch.
|
||||
uint64 batchIndex;
|
||||
// The parent batch hash.
|
||||
bytes32 parentBatchHash;
|
||||
// Concatenated raw data of RLP encoded L2 txs
|
||||
bytes l2Transactions;
|
||||
}
|
||||
|
||||
struct L2MessageProof {
|
||||
// The index of the batch where the message belongs to.
|
||||
uint256 batchIndex;
|
||||
// The hash of the batch where the message belongs to.
|
||||
bytes32 batchHash;
|
||||
// Concatenation of merkle proof for withdraw merkle trie.
|
||||
bytes merkleProof;
|
||||
}
|
||||
@@ -82,7 +125,14 @@ contract MockBridgeL1 {
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
mapping(uint256 => bytes32) public committedBatches;
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor() {
|
||||
maxNumTxInBatch = 44;
|
||||
paddingTxHash = 0x0000000000000000000000000000000000000000000000000000000000000000;
|
||||
}
|
||||
|
||||
/***********************************
|
||||
* Functions from L2GasPriceOracle *
|
||||
@@ -104,7 +154,7 @@ contract MockBridgeL1 {
|
||||
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, target, value, messageNonce, message);
|
||||
{
|
||||
address _sender = applyL1ToL2Alias(address(this));
|
||||
emit QueueTransaction(_sender, target, 0, uint64(messageNonce), gasLimit, _xDomainCalldata);
|
||||
emit QueueTransaction(_sender, target, 0, messageNonce, gasLimit, _xDomainCalldata);
|
||||
}
|
||||
|
||||
emit SentMessage(msg.sender, target, value, messageNonce, gasLimit, message);
|
||||
@@ -128,65 +178,37 @@ contract MockBridgeL1 {
|
||||
* Functions from ScrollChain *
|
||||
******************************/
|
||||
|
||||
function commitBatch(
|
||||
uint8 /*version*/,
|
||||
bytes calldata /*parentBatchHeader*/,
|
||||
bytes[] memory chunks,
|
||||
bytes calldata /*skippedL1MessageBitmap*/
|
||||
) external {
|
||||
// check whether the batch is empty
|
||||
uint256 _chunksLength = chunks.length;
|
||||
require(_chunksLength > 0, "batch is empty");
|
||||
function commitBatch(Batch memory _batch) external {
|
||||
_commitBatch(_batch);
|
||||
}
|
||||
|
||||
uint256 dataPtr;
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
mstore(0x40, add(dataPtr, mul(_chunksLength, 32)))
|
||||
function commitBatches(Batch[] memory _batches) external {
|
||||
for (uint256 i = 0; i < _batches.length; i++) {
|
||||
_commitBatch(_batches[i]);
|
||||
}
|
||||
}
|
||||
|
||||
for (uint256 i = 0; i < _chunksLength; i++) {
|
||||
_commitChunk(dataPtr, chunks[i]);
|
||||
|
||||
unchecked {
|
||||
dataPtr += 32;
|
||||
}
|
||||
}
|
||||
|
||||
bytes32 _dataHash;
|
||||
assembly {
|
||||
let dataLen := mul(_chunksLength, 0x20)
|
||||
_dataHash := keccak256(sub(dataPtr, dataLen), dataLen)
|
||||
}
|
||||
|
||||
bytes memory paddedData = new bytes(89);
|
||||
assembly {
|
||||
mstore(add(paddedData, 57), _dataHash)
|
||||
}
|
||||
|
||||
uint256 batchPtr;
|
||||
assembly {
|
||||
batchPtr := add(paddedData, 32)
|
||||
}
|
||||
bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89);
|
||||
committedBatches[0] = _batchHash;
|
||||
emit CommitBatch(_batchHash);
|
||||
function revertBatch(bytes32 _batchHash) external {
|
||||
emit RevertBatch(_batchHash);
|
||||
}
|
||||
|
||||
function finalizeBatchWithProof(
|
||||
bytes calldata /*batchHeader*/,
|
||||
bytes32 /*prevStateRoot*/,
|
||||
bytes32 postStateRoot,
|
||||
bytes32 withdrawRoot,
|
||||
bytes calldata /*aggrProof*/
|
||||
bytes32 _batchHash,
|
||||
uint256[] memory,
|
||||
uint256[] memory
|
||||
) external {
|
||||
bytes32 _batchHash = committedBatches[0];
|
||||
emit FinalizeBatch(_batchHash, postStateRoot, withdrawRoot);
|
||||
emit FinalizeBatch(_batchHash);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
function _commitBatch(Batch memory _batch) internal {
|
||||
bytes32 _batchHash = _computePublicInputHash(_batch);
|
||||
emit CommitBatch(_batchHash);
|
||||
}
|
||||
|
||||
/// @dev Internal function to generate the correct cross domain calldata for a message.
|
||||
/// @param _sender Message sender address.
|
||||
/// @param _target Target contract address.
|
||||
@@ -212,10 +234,6 @@ contract MockBridgeL1 {
|
||||
);
|
||||
}
|
||||
|
||||
/// @notice Utility function that converts the address in the L1 that submitted a tx to
|
||||
/// the inbox to the msg.sender viewed in the L2
|
||||
/// @param l1Address the address in the L1 that triggered the tx to L2
|
||||
/// @return l2Address L2 address as viewed in msg.sender
|
||||
function applyL1ToL2Alias(address l1Address) internal pure returns (address l2Address) {
|
||||
uint160 offset = uint160(0x1111000000000000000000000000000000001111);
|
||||
unchecked {
|
||||
@@ -223,67 +241,140 @@ contract MockBridgeL1 {
|
||||
}
|
||||
}
|
||||
|
||||
function _commitChunk(
|
||||
uint256 memPtr,
|
||||
bytes memory _chunk
|
||||
) internal pure {
|
||||
uint256 chunkPtr;
|
||||
uint256 startDataPtr;
|
||||
uint256 dataPtr;
|
||||
uint256 blockPtr;
|
||||
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
startDataPtr := dataPtr
|
||||
chunkPtr := add(_chunk, 0x20) // skip chunkLength
|
||||
blockPtr := add(chunkPtr, 1) // skip numBlocks
|
||||
}
|
||||
|
||||
uint256 _numBlocks = ChunkCodec.validateChunkLength(chunkPtr, _chunk.length);
|
||||
|
||||
// concatenate block contexts
|
||||
uint256 _totalTransactionsInChunk;
|
||||
for (uint256 i = 0; i < _numBlocks; i++) {
|
||||
dataPtr = ChunkCodec.copyBlockContext(chunkPtr, dataPtr, i);
|
||||
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock;
|
||||
blockPtr += ChunkCodec.BLOCK_CONTEXT_LENGTH;
|
||||
/// @dev Internal function to compute the public input hash.
|
||||
/// @param batch The batch to compute.
|
||||
function _computePublicInputHash(Batch memory batch)
|
||||
internal
|
||||
view
|
||||
returns (
|
||||
bytes32
|
||||
)
|
||||
{
|
||||
uint256 publicInputsPtr;
|
||||
// 1. append prevStateRoot, newStateRoot and withdrawTrieRoot to public inputs
|
||||
{
|
||||
bytes32 prevStateRoot = batch.prevStateRoot;
|
||||
bytes32 newStateRoot = batch.newStateRoot;
|
||||
bytes32 withdrawTrieRoot = batch.withdrawTrieRoot;
|
||||
// number of bytes in public inputs: 32 * 3 + 124 * blocks + 32 * MAX_NUM_TXS
|
||||
uint256 publicInputsSize = 32 * 3 + batch.blocks.length * 124 + 32 * maxNumTxInBatch;
|
||||
assembly {
|
||||
publicInputsPtr := mload(0x40)
|
||||
mstore(0x40, add(publicInputsPtr, publicInputsSize))
|
||||
mstore(publicInputsPtr, prevStateRoot)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
mstore(publicInputsPtr, newStateRoot)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
mstore(publicInputsPtr, withdrawTrieRoot)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
}
|
||||
}
|
||||
|
||||
assembly {
|
||||
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
|
||||
blockPtr := add(chunkPtr, 1) // reset block ptr
|
||||
}
|
||||
uint64 numTransactionsInBatch;
|
||||
BlockContext memory _block;
|
||||
// 2. append block information to public inputs.
|
||||
for (uint256 i = 0; i < batch.blocks.length; i++) {
|
||||
// validate blocks, we won't check first block against previous batch.
|
||||
{
|
||||
BlockContext memory _currentBlock = batch.blocks[i];
|
||||
if (i > 0) {
|
||||
require(_block.blockHash == _currentBlock.parentHash, "Parent hash mismatch");
|
||||
require(_block.blockNumber + 1 == _currentBlock.blockNumber, "Block number mismatch");
|
||||
}
|
||||
_block = _currentBlock;
|
||||
}
|
||||
|
||||
// concatenate tx hashes
|
||||
uint256 l2TxPtr = ChunkCodec.l2TxPtr(chunkPtr, _numBlocks);
|
||||
while (_numBlocks > 0) {
|
||||
// concatenate l2 transaction hashes
|
||||
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
|
||||
for (uint256 j = 0; j < _numTransactionsInBlock; j++) {
|
||||
bytes32 txHash;
|
||||
(txHash, l2TxPtr) = ChunkCodec.loadL2TxHash(l2TxPtr);
|
||||
// append blockHash and parentHash to public inputs
|
||||
{
|
||||
bytes32 blockHash = _block.blockHash;
|
||||
bytes32 parentHash = _block.parentHash;
|
||||
assembly {
|
||||
mstore(dataPtr, txHash)
|
||||
dataPtr := add(dataPtr, 0x20)
|
||||
mstore(publicInputsPtr, blockHash)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
mstore(publicInputsPtr, parentHash)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
}
|
||||
}
|
||||
// append blockNumber and blockTimestamp to public inputs
|
||||
{
|
||||
uint256 blockNumber = _block.blockNumber;
|
||||
uint256 blockTimestamp = _block.timestamp;
|
||||
assembly {
|
||||
mstore(publicInputsPtr, shl(192, blockNumber))
|
||||
publicInputsPtr := add(publicInputsPtr, 0x8)
|
||||
mstore(publicInputsPtr, shl(192, blockTimestamp))
|
||||
publicInputsPtr := add(publicInputsPtr, 0x8)
|
||||
}
|
||||
}
|
||||
// append baseFee to public inputs
|
||||
{
|
||||
uint256 baseFee = _block.baseFee;
|
||||
assembly {
|
||||
mstore(publicInputsPtr, baseFee)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
}
|
||||
}
|
||||
uint64 numTransactionsInBlock = _block.numTransactions;
|
||||
// gasLimit, numTransactions and numL1Messages to public inputs
|
||||
{
|
||||
uint256 gasLimit = _block.gasLimit;
|
||||
uint256 numL1MessagesInBlock = _block.numL1Messages;
|
||||
assembly {
|
||||
mstore(publicInputsPtr, shl(192, gasLimit))
|
||||
publicInputsPtr := add(publicInputsPtr, 0x8)
|
||||
mstore(publicInputsPtr, shl(240, numTransactionsInBlock))
|
||||
publicInputsPtr := add(publicInputsPtr, 0x2)
|
||||
mstore(publicInputsPtr, shl(240, numL1MessagesInBlock))
|
||||
publicInputsPtr := add(publicInputsPtr, 0x2)
|
||||
}
|
||||
}
|
||||
numTransactionsInBatch += numTransactionsInBlock;
|
||||
}
|
||||
require(numTransactionsInBatch <= maxNumTxInBatch, "Too many transactions in batch");
|
||||
|
||||
unchecked {
|
||||
_numBlocks -= 1;
|
||||
blockPtr += ChunkCodec.BLOCK_CONTEXT_LENGTH;
|
||||
// 3. append transaction hash to public inputs.
|
||||
uint256 _l2TxnPtr;
|
||||
{
|
||||
bytes memory l2Transactions = batch.l2Transactions;
|
||||
assembly {
|
||||
_l2TxnPtr := add(l2Transactions, 0x20)
|
||||
}
|
||||
}
|
||||
for (uint256 i = 0; i < batch.blocks.length; i++) {
|
||||
uint256 numL1MessagesInBlock = batch.blocks[i].numL1Messages;
|
||||
require(numL1MessagesInBlock == 0);
|
||||
uint256 numTransactionsInBlock = batch.blocks[i].numTransactions;
|
||||
for (uint256 j = numL1MessagesInBlock; j < numTransactionsInBlock; ++j) {
|
||||
bytes32 hash;
|
||||
assembly {
|
||||
let txPayloadLength := shr(224, mload(_l2TxnPtr))
|
||||
_l2TxnPtr := add(_l2TxnPtr, 4)
|
||||
_l2TxnPtr := add(_l2TxnPtr, txPayloadLength)
|
||||
hash := keccak256(sub(_l2TxnPtr, txPayloadLength), txPayloadLength)
|
||||
mstore(publicInputsPtr, hash)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check chunk has correct length
|
||||
require(l2TxPtr - chunkPtr == _chunk.length, "incomplete l2 transaction data");
|
||||
|
||||
// compute data hash and store to memory
|
||||
assembly {
|
||||
let dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
|
||||
mstore(memPtr, dataHash)
|
||||
// 4. append padding transaction to public inputs.
|
||||
bytes32 txHashPadding = paddingTxHash;
|
||||
for (uint256 i = numTransactionsInBatch; i < maxNumTxInBatch; i++) {
|
||||
assembly {
|
||||
mstore(publicInputsPtr, txHashPadding)
|
||||
publicInputsPtr := add(publicInputsPtr, 0x20)
|
||||
}
|
||||
}
|
||||
|
||||
// 5. compute public input hash
|
||||
bytes32 publicInputHash;
|
||||
{
|
||||
uint256 publicInputsSize = 32 * 3 + batch.blocks.length * 124 + 32 * maxNumTxInBatch;
|
||||
assembly {
|
||||
publicInputHash := keccak256(sub(publicInputsPtr, publicInputsSize), publicInputsSize)
|
||||
}
|
||||
}
|
||||
|
||||
return publicInputHash;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,24 @@ contract MockBridgeL2 {
|
||||
/// @param messageHash The hash of the corresponding message.
|
||||
event AppendMessage(uint256 index, bytes32 messageHash);
|
||||
|
||||
/********************************
|
||||
* Events from L1BlockContainer *
|
||||
********************************/
|
||||
|
||||
/// @notice Emitted when a block is imported.
|
||||
/// @param blockHash The hash of the imported block.
|
||||
/// @param blockHeight The height of the imported block.
|
||||
/// @param blockTimestamp The timestamp of the imported block.
|
||||
/// @param baseFee The base fee of the imported block.
|
||||
/// @param stateRoot The state root of the imported block.
|
||||
event ImportBlock(
|
||||
bytes32 indexed blockHash,
|
||||
uint256 blockHeight,
|
||||
uint256 blockTimestamp,
|
||||
uint256 baseFee,
|
||||
bytes32 stateRoot
|
||||
);
|
||||
|
||||
/*********************************
|
||||
* Events from L2ScrollMessenger *
|
||||
*********************************/
|
||||
|
||||
@@ -11,12 +11,12 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
bcmd "scroll-tech/bridge/cmd"
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm/migrate"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
)
|
||||
|
||||
@@ -46,13 +46,13 @@ var (
|
||||
)
|
||||
|
||||
func setupDB(t *testing.T) *gorm.DB {
|
||||
cfg := &database.Config{
|
||||
cfg := &config.DBConfig{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
db, err := database.InitDB(cfg)
|
||||
db, err := utils.InitDB(cfg)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -63,9 +63,9 @@ func setupDB(t *testing.T) *gorm.DB {
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
bridgeApp = bcmd.NewBridgeApp(base, "../conf/config.json")
|
||||
defer bridgeApp.Free()
|
||||
defer base.Free()
|
||||
m.Run()
|
||||
bridgeApp.Free()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
@@ -129,10 +129,6 @@ func prepareContracts(t *testing.T) {
|
||||
func TestFunction(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
// process start test
|
||||
t.Run("TestProcessStart", testProcessStart)
|
||||
t.Run("TestProcessStartEnableMetrics", testProcessStartEnableMetrics)
|
||||
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
|
||||
|
||||
@@ -140,7 +136,7 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestRelayL1MessageSucceed", testRelayL1MessageSucceed)
|
||||
|
||||
// l2 message
|
||||
// TODO: add a "user relay l2msg Succeed" test
|
||||
t.Run("TestRelayL2MessageSucceed", testRelayL2MessageSucceed)
|
||||
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
|
||||
@@ -8,18 +8,20 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
func testImportL1GasPrice(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
@@ -43,10 +45,10 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
// check db status
|
||||
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
|
||||
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, number, latestBlockHeight)
|
||||
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
blocks, err := l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
@@ -54,7 +56,7 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
|
||||
// relay gas price
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.NotEmpty(t, blocks[0].OracleTxHash)
|
||||
@@ -63,44 +65,54 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
|
||||
func testImportL2GasPrice(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
defer utils.CloseDB(db)
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add fake chunk
|
||||
chunk := &types.Chunk{
|
||||
Blocks: []*types.WrappedBlock{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
// add fake blocks
|
||||
traces := []*bridgeTypes.WrappedBlock{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawTrieRoot: common.Hash{},
|
||||
},
|
||||
}
|
||||
chunkHash, err := chunk.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*types.Chunk{chunk})
|
||||
blockTraceOrm := orm.NewBlockTrace(db)
|
||||
assert.NoError(t, blockTraceOrm.InsertWrappedBlocks(traces))
|
||||
|
||||
parentBatch := &bridgeTypes.BatchInfo{
|
||||
Index: 0,
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
batchData := bridgeTypes.NewBatchData(parentBatch, []*bridgeTypes.WrappedBlock{traces[0]}, l2Cfg.BatchProposerConfig.PublicInputConfig)
|
||||
blockBatchOrm := orm.NewBlockBatch(db)
|
||||
err = db.Transaction(func(tx *gorm.DB) error {
|
||||
_, dbTxErr := blockBatchOrm.InsertBlockBatchByBatchData(tx, batchData)
|
||||
if dbTxErr != nil {
|
||||
return dbTxErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check db status
|
||||
batch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
batch, err := blockBatchOrm.GetLatestBatch()
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, batch.OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(batch.OracleStatus), types.GasOraclePending)
|
||||
|
||||
// relay gas price
|
||||
l2Relayer.ProcessGasPriceOracle()
|
||||
batch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
batch, err = blockBatchOrm.GetLatestBatch()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, batch.OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(batch.OracleStatus), types.GasOracleImporting)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user