mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
81 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
221a06ecf2 | ||
|
|
a16bba373a | ||
|
|
3a7c49c550 | ||
|
|
aaea3cc1f2 | ||
|
|
b96e8778a5 | ||
|
|
ab82b79638 | ||
|
|
0a8164ee5b | ||
|
|
548863854b | ||
|
|
ac20a0045c | ||
|
|
fdb71dd6aa | ||
|
|
693974ded4 | ||
|
|
08dac095b6 | ||
|
|
8910699e31 | ||
|
|
4909dc8cd2 | ||
|
|
b0609fa15c | ||
|
|
0cb29c3942 | ||
|
|
03089eaeee | ||
|
|
ef673c981f | ||
|
|
7157382b6d | ||
|
|
1264f82290 | ||
|
|
b8c463ffbe | ||
|
|
db07180e7a | ||
|
|
8cb750e5eb | ||
|
|
7dd1b05f46 | ||
|
|
bf2692b7cb | ||
|
|
566fb23b9d | ||
|
|
2e627f781a | ||
|
|
bc5ec89b70 | ||
|
|
99454e5b88 | ||
|
|
144c7ed024 | ||
|
|
4aa5d5cd37 | ||
|
|
486c7ee0f9 | ||
|
|
0243c86b3c | ||
|
|
e6db4ac3a8 | ||
|
|
50040a164e | ||
|
|
8494ab1899 | ||
|
|
2102e16fdb | ||
|
|
c2ab4bf16d | ||
|
|
1059f9d3f8 | ||
|
|
9e8c3432c3 | ||
|
|
ff380141a8 | ||
|
|
2216ad4271 | ||
|
|
25d5fabac9 | ||
|
|
d494f4419c | ||
|
|
e843419397 | ||
|
|
badde3cba5 | ||
|
|
caa16e1676 | ||
|
|
7c742da488 | ||
|
|
0cdb0dc7a9 | ||
|
|
3143373f5f | ||
|
|
18ee6a67c5 | ||
|
|
05da46a719 | ||
|
|
55e0b11d17 | ||
|
|
6f72d0447e | ||
|
|
76d66eba58 | ||
|
|
c551609e17 | ||
|
|
47e5a43646 | ||
|
|
7604612581 | ||
|
|
35d4ec5ad0 | ||
|
|
8f745e9836 | ||
|
|
4ec1045916 | ||
|
|
f94e21dd45 | ||
|
|
205641a65c | ||
|
|
d02f41b2c9 | ||
|
|
72204358f0 | ||
|
|
072bc21d20 | ||
|
|
f7a2465db8 | ||
|
|
154ff0c8a0 | ||
|
|
2b266aaa68 | ||
|
|
410f14bc7d | ||
|
|
4d903bc9b2 | ||
|
|
59a2f1e998 | ||
|
|
20c5e9855b | ||
|
|
1f2fe74cbe | ||
|
|
0e12661fd5 | ||
|
|
04e66231e5 | ||
|
|
417a228523 | ||
|
|
dcd85b2f56 | ||
|
|
afb6476823 | ||
|
|
d991d6b99d | ||
|
|
f0920362c5 |
6
.github/workflows/bridge_history_api.yml
vendored
6
.github/workflows/bridge_history_api.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
|
||||
6
.github/workflows/common.yml
vendored
6
.github/workflows/common.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache cargo
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -79,7 +79,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
|
||||
16
.github/workflows/contracts.yml
vendored
16
.github/workflows/contracts.yml
vendored
@@ -82,17 +82,15 @@ jobs:
|
||||
run : forge coverage --report lcov
|
||||
|
||||
- name : Prune coverage
|
||||
run : lcov --remove ./lcov.info -o ./lcov.info.pruned 'src/mocks/*' 'src/test/*' 'scripts/*' 'node_modules/*' 'lib/*'
|
||||
run : lcov --rc branch_coverage=1 --remove ./lcov.info -o ./lcov.info.pruned 'src/mocks/*' 'src/test/*' 'scripts/*' 'node_modules/*' 'lib/*'
|
||||
|
||||
- name: Report code coverage
|
||||
uses: zgosalvez/github-actions-report-lcov@v3
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
coverage-files: contracts/lcov.info.pruned
|
||||
minimum-coverage: 0
|
||||
artifact-name: code-coverage-report
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
working-directory: contracts
|
||||
update-comment: true
|
||||
files: contracts/lcov.info.pruned
|
||||
flags: contracts
|
||||
|
||||
hardhat:
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
6
.github/workflows/coordinator.yml
vendored
6
.github/workflows/coordinator.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
|
||||
6
.github/workflows/database.yml
vendored
6
.github/workflows/database.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
|
||||
197
.github/workflows/docker.yml
vendored
197
.github/workflows/docker.yml
vendored
@@ -9,67 +9,73 @@ jobs:
|
||||
event_watcher:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push event_watcher docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/event-watcher:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push event_watcher docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/event-watcher:${{github.ref_name}}
|
||||
scrolltech/event-watcher:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
gas_oracle:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push gas_oracle docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/gas-oracle:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push gas_oracle docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/gas-oracle:${{github.ref_name}}
|
||||
scrolltech/gas-oracle:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push rollup_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/rollup-relayer:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
bridgehistoryapi-cross-msg-fetcher:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push rollup_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/rollup-relayer:${{github.ref_name}}
|
||||
scrolltech/rollup-relayer:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
bridgehistoryapi-fetcher:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -81,16 +87,18 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-cross-msg-fetcher docker
|
||||
- name: Build and push bridgehistoryapi-fetcher docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-cross-msg-fetcher.Dockerfile
|
||||
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/bridgehistoryapi-cross-msg-fetcher:${{github.ref_name}}
|
||||
tags: |
|
||||
scrolltech/bridgehistoryapi-fetcher:${{github.ref_name}}
|
||||
scrolltech/bridgehistoryapi-fetcher:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
bridgehistoryapi-server:
|
||||
bridgehistoryapi-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -102,33 +110,60 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-server docker
|
||||
- name: Build and push bridgehistoryapi-api docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-server.Dockerfile
|
||||
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
|
||||
tags: |
|
||||
scrolltech/bridgehistoryapi-api:${{github.ref_name}}
|
||||
scrolltech/bridgehistoryapi-api:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
coordinator:
|
||||
coordinator-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push coordinator docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/coordinator:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push coordinator docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/coordinator-api:${{github.ref_name}}
|
||||
scrolltech/coordinator-api:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
coordinator-cron:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push coordinator docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-cron.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/coordinator-cron:${{github.ref_name}}
|
||||
scrolltech/coordinator-cron:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
2
.github/workflows/integration.yml
vendored
2
.github/workflows/integration.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
|
||||
2
.github/workflows/intermediate-docker.yml
vendored
2
.github/workflows/intermediate-docker.yml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
description: 'Go version'
|
||||
required: true
|
||||
type: string
|
||||
default: '1.19'
|
||||
default: '1.20'
|
||||
RUST_VERSION:
|
||||
description: 'Rust toolchain version'
|
||||
required: true
|
||||
|
||||
8
.github/workflows/prover.yml
vendored
8
.github/workflows/prover.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache cargo
|
||||
@@ -73,7 +73,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
|
||||
6
.github/workflows/rollup.yml
vendored
6
.github/workflows/rollup.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,9 +1,6 @@
|
||||
[submodule "l2geth"]
|
||||
path = l2geth
|
||||
url = git@github.com:scroll-tech/go-ethereum.git
|
||||
[submodule "rpc-gateway"]
|
||||
path = rpc-gateway
|
||||
url = git@github.com:scroll-tech/rpc-gateway.git
|
||||
[submodule "contracts/lib/ds-test"]
|
||||
path = contracts/lib/ds-test
|
||||
url = https://github.com/dapphub/ds-test
|
||||
|
||||
@@ -33,7 +33,7 @@ Examples of unacceptable behavior include:
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
* Other conduct that could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
@@ -22,10 +22,11 @@ We'd also love PRs. If you're thinking of a large PR, we advise opening up an is
|
||||
## Submitting a pull request
|
||||
|
||||
1. [Fork][fork] and clone the repository.
|
||||
1. Create a new branch: `git checkout -b my-branch-name`.
|
||||
1. Make your change, add tests, and make sure the tests still pass.
|
||||
1. Push to your fork and [submit a pull request][pr].
|
||||
1. Pat yourself on the back and wait for your pull request to be reviewed and merged.
|
||||
2. Create a new branch: `git checkout -b my-branch-name`.
|
||||
3. Make your change, add tests, and make sure the tests still pass.
|
||||
4. Format your code in scroll home directory: `make lint && make fmt`
|
||||
5. Push to your fork and [submit a pull request][pr].
|
||||
6. Pat yourself on the back and wait for your pull request to be reviewed and merged.
|
||||
|
||||
Here are a few things you can do that will increase the likelihood of your pull request being accepted:
|
||||
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 Scroll
|
||||
Copyright (c) 2022-2023 Scroll
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
34
Makefile
34
Makefile
@@ -1,11 +1,20 @@
|
||||
.PHONY: check update dev_docker build_test_docker run_test_docker clean
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v4.3.55
|
||||
L2GETH_TAG=scroll-v5.1.6
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
update:
|
||||
go work sync
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
|
||||
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
|
||||
lint: ## The code's format and security checks.
|
||||
make -C rollup lint
|
||||
@@ -15,23 +24,22 @@ lint: ## The code's format and security checks.
|
||||
make -C prover lint
|
||||
make -C bridge-history-api lint
|
||||
|
||||
update: ## update dependencies
|
||||
fmt: ## format the code
|
||||
go work sync
|
||||
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
goimports -local $(PWD)/rollup/ -w .
|
||||
cd $(PWD)/bridge-history-api/ && go mod tidy
|
||||
cd $(PWD)/common/ && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go mod tidy
|
||||
cd $(PWD)/database/ && go mod tidy
|
||||
cd $(PWD)/prover/ && go mod tidy
|
||||
cd $(PWD)/rollup/ && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go mod tidy
|
||||
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
goimports -local $(PWD)/coordinator/ -w .
|
||||
goimports -local $(PWD)/database/ -w .
|
||||
goimports -local $(PWD)/prover/ -w .
|
||||
goimports -local $(PWD)/prover-stats-api/ -w .
|
||||
goimports -local $(PWD)/rollup/ -w .
|
||||
goimports -local $(PWD)/tests/integration-test/ -w .
|
||||
|
||||
dev_docker: ## build docker images for development/testing usages
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
## Directory Structure
|
||||
|
||||
<pre>
|
||||
├── <a href="./bridge-history-api/">bridge-history-api</a>: Bridge history service that collects deposit and withdraw events from both L1 and L2 chain and generates withdrawal proofs
|
||||
├── <a href="./bridge-history-api/">bridge-history-api</a>: Bridge history service that collects deposit and withdraw events from both L1 and L2 chains and generates withdrawal proofs
|
||||
├── <a href="./common/">common</a>: Common libraries and types
|
||||
├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers
|
||||
├── <a href="./database">database</a>: Database client and schema definition
|
||||
@@ -29,7 +29,7 @@
|
||||
We welcome community contributions to this repository. Before you submit any issues or PRs, please read the [Code of Conduct](CODE_OF_CONDUCT.md) and the [Contribution Guideline](CONTRIBUTING.md).
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.19
|
||||
+ Go 1.20
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
+ Hardhat / Foundry
|
||||
+ Docker
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
.PHONY: lint
|
||||
REPO_ROOT_DIR=./..
|
||||
IMAGE_VERSION=latest
|
||||
PWD=$(shell pwd)
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
@@ -11,16 +12,31 @@ test:
|
||||
bridgehistoryapi-db-cli:
|
||||
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli
|
||||
|
||||
bridgehistoryapi-cross-msg-fetcher:
|
||||
go build -o $(PWD)/build/bin/bridgehistoryapi-cross-msg-fetcher ./cmd/cross_msg_fetcher
|
||||
bridgehistoryapi-fetcher:
|
||||
go build -o $(PWD)/build/bin/bridgehistoryapi-fetcher ./cmd/fetcher
|
||||
|
||||
bridgehistoryapi-server:
|
||||
go build -o $(PWD)/build/bin/bridgehistoryapi-server ./cmd/backend_server
|
||||
bridgehistoryapi-api:
|
||||
go build -o $(PWD)/build/bin/bridgehistoryapi-api ./cmd/api
|
||||
|
||||
db-docker:
|
||||
reset-env:
|
||||
if docker ps -a -q -f name=bridgehistoryapi-redis | grep -q . ; then \
|
||||
docker stop bridgehistoryapi-redis; \
|
||||
docker rm bridgehistoryapi-redis; \
|
||||
fi
|
||||
docker run --name bridgehistoryapi-redis -d -p 6379:6379 redis:latest
|
||||
if docker ps -a -q -f name=bridgehistoryapi-history-db | grep -q . ; then \
|
||||
docker stop bridgehistoryapi-history-db; \
|
||||
docker rm bridgehistoryapi-history-db; \
|
||||
fi
|
||||
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
|
||||
until docker exec bridgehistoryapi-history-db pg_isready -h localhost -p 5432 -U postgres > /dev/null; do \
|
||||
echo "Waiting for postgres to be ready..."; \
|
||||
sleep 1; \
|
||||
done
|
||||
echo "Postgres is ready."
|
||||
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli && $(PWD)/build/bin/bridgehistoryapi-db-cli reset
|
||||
|
||||
bridgehistoryapi-docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-cross-msg-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-cross-msg-fetcher.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-server:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-server.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
|
||||
|
||||
@@ -7,39 +7,36 @@ The bridge-history-api contains three distinct components
|
||||
|
||||
### bridgehistoryapi-db-cli
|
||||
|
||||
Provide init, show version, rollback, check status services of DB
|
||||
Provide init, show version, rollback, and check status services of DB
|
||||
```
|
||||
cd ./bridge-history-api
|
||||
make bridgehistoryapi-db-cli
|
||||
./build/bin/bridgehistoryapi-db-cli [command]
|
||||
```
|
||||
|
||||
### bridgehistoryapi-cross-msg-fetcher
|
||||
### bridgehistoryapi-fetcher
|
||||
|
||||
Fetch the transactions from both l1 and l2
|
||||
Fetch the transactions from both L1 and L2
|
||||
```
|
||||
cd ./bridge-history-api
|
||||
make bridgehistoryapi-cross-msg-fetcher
|
||||
./build/bin/bridgehistoryapi-cross-msg-fetcher
|
||||
make bridgehistoryapi-fetcher
|
||||
./build/bin/bridgehistoryapi-fetcher
|
||||
```
|
||||
|
||||
### bridgehistoryapi-server
|
||||
### bridgehistoryapi-api
|
||||
|
||||
provides REST APIs. Please refer to the API details below.
|
||||
```
|
||||
cd ./bridge-history-api
|
||||
make bridgehistoryapi-server
|
||||
./build/bin/bridgehistoryapi-server
|
||||
make bridgehistoryapi-api
|
||||
./build/bin/bridgehistoryapi-api
|
||||
```
|
||||
|
||||
## APIs provided by bridgehistoryapi-server
|
||||
## APIs provided by bridgehistoryapi-api
|
||||
|
||||
assume `bridgehistoryapi-server` listening on `https://localhost:8080`
|
||||
can change this port thru modify `config.json`
|
||||
|
||||
1. `/txs`
|
||||
1. `/api/txs`
|
||||
```
|
||||
// @Summary get all txs under given address
|
||||
// @Summary get all txs under the given address
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param address query string true "wallet address"
|
||||
@@ -49,34 +46,36 @@ can change this port thru modify `config.json`
|
||||
// @Router /api/txs [get]
|
||||
```
|
||||
|
||||
2. `/txsbyhashes`
|
||||
2. `/api/l2/withdrawals`
|
||||
```
|
||||
// @Summary get txs by given tx hashes
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param hashes query string array true "array of hashes list"
|
||||
// @Success 200
|
||||
// @Router /api/txsbyhashes [post]
|
||||
```
|
||||
|
||||
3. `/claimable`
|
||||
```
|
||||
// @Summary get all claimable txs under given address
|
||||
// @Summary get all L2 withdrawals under given address
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param address query string true "wallet address"
|
||||
// @Param page_size query int true "page size"
|
||||
// @Param page query int true "page"
|
||||
// @Success 200
|
||||
// @Router /api/claimable [get]
|
||||
// @Router /api/l2/withdrawals [get]
|
||||
```
|
||||
|
||||
4. `/withdraw_root`
|
||||
3. `/api/l2/unclaimed/withdrawals`
|
||||
```
|
||||
// @Summary get withdraw_root of given batch index
|
||||
// @Summary get all L2 unclaimed withdrawals under the given address
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param batch_index query string true "batch_index"
|
||||
// @Param address query string true "wallet address"
|
||||
// @Param page_size query int true "page size"
|
||||
// @Param page query int true "page"
|
||||
// @Success 200
|
||||
// @Router /api/withdraw_root [get]
|
||||
```
|
||||
// @Router /api/l2/unclaimed/withdrawals [get]
|
||||
```
|
||||
|
||||
4. `/api/txsbyhashes`
|
||||
```
|
||||
// @Summary get txs by given tx hashes
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param hashes query string array true "array of hashes"
|
||||
// @Success 200
|
||||
// @Router /api/txsbyhashes [post]
|
||||
```
|
||||
|
||||
File diff suppressed because one or more lines are too long
105
bridge-history-api/cmd/api/app/app.go
Normal file
105
bridge-history-api/cmd/api/app/app.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/controller/api"
|
||||
"scroll-tech/bridge-history-api/internal/route"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "Scroll Bridge History API Web Service"
|
||||
app.Usage = "The Scroll Bridge History API Web Service"
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
db, err := database.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := database.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
opts := &redis.Options{
|
||||
Addr: cfg.Redis.Address,
|
||||
Username: cfg.Redis.Username,
|
||||
Password: cfg.Redis.Password,
|
||||
MinIdleConns: cfg.Redis.MinIdleConns,
|
||||
ReadTimeout: time.Duration(cfg.Redis.ReadTimeoutMs * int(time.Millisecond)),
|
||||
}
|
||||
// Production Redis service has enabled transit_encryption.
|
||||
if !cfg.Redis.Local {
|
||||
opts.TLSConfig = &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true, //nolint:gosec
|
||||
}
|
||||
}
|
||||
log.Info("init redis client", "addr", opts.Addr, "user name", opts.Username, "is local", cfg.Redis.Local,
|
||||
"min idle connections", opts.MinIdleConns, "read timeout", opts.ReadTimeout)
|
||||
redisClient := redis.NewClient(opts)
|
||||
api.InitController(db, redisClient)
|
||||
|
||||
router := gin.Default()
|
||||
registry := prometheus.DefaultRegisterer
|
||||
route.Route(router, cfg, registry)
|
||||
|
||||
go func() {
|
||||
port := ctx.Int(utils.ServicePortFlag.Name)
|
||||
if runServerErr := router.Run(fmt.Sprintf(":%d", port)); runServerErr != nil {
|
||||
log.Crit("run http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
|
||||
observability.Server(ctx, db)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run event watcher cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
7
bridge-history-api/cmd/api/main.go
Normal file
7
bridge-history-api/cmd/api/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge-history-api/cmd/api/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/internal/controller"
|
||||
"bridge-history-api/internal/route"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "Scroll Bridge History Web Service"
|
||||
app.Usage = "The Scroll Bridge History Web Service"
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
db, err := utils.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := utils.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
// init Prover Stats API
|
||||
port := cfg.Server.HostPort
|
||||
|
||||
router := gin.Default()
|
||||
controller.InitController(db)
|
||||
route.Route(router, cfg)
|
||||
|
||||
go func() {
|
||||
if runServerErr := router.Run(fmt.Sprintf(":%s", port)); runServerErr != nil {
|
||||
log.Crit("run http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run event watcher cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "bridge-history-api/cmd/backend_server/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/crossmsg"
|
||||
"bridge-history-api/crossmsg/messageproof"
|
||||
"bridge-history-api/orm"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
app *cli.App
|
||||
)
|
||||
|
||||
func init() {
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "Scroll Bridge History API"
|
||||
app.Usage = "The Scroll Bridge Web Backend"
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
defer cancel()
|
||||
|
||||
l1client, err := ethclient.Dial(cfg.L1.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
l2client, err := ethclient.Dial(cfg.L2.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
db, err := utils.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := utils.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
l1worker := &crossmsg.FetchEventWorker{F: crossmsg.L1FetchAndSaveEvents, G: crossmsg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"}
|
||||
|
||||
l2worker := &crossmsg.FetchEventWorker{F: crossmsg.L2FetchAndSaveEvents, G: crossmsg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"}
|
||||
|
||||
l1AddressList := []common.Address{
|
||||
common.HexToAddress(cfg.L1.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.L1.ERC721GatewayAddr),
|
||||
common.HexToAddress(cfg.L1.ERC1155GatewayAddr),
|
||||
common.HexToAddress(cfg.L1.MessengerAddr),
|
||||
common.HexToAddress(cfg.L1.ETHGatewayAddr),
|
||||
common.HexToAddress(cfg.L1.StandardERC20Gateway),
|
||||
common.HexToAddress(cfg.L1.WETHGatewayAddr),
|
||||
}
|
||||
|
||||
l2AddressList := []common.Address{
|
||||
common.HexToAddress(cfg.L2.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.L2.ERC721GatewayAddr),
|
||||
common.HexToAddress(cfg.L2.ERC1155GatewayAddr),
|
||||
common.HexToAddress(cfg.L2.MessengerAddr),
|
||||
common.HexToAddress(cfg.L2.ETHGatewayAddr),
|
||||
common.HexToAddress(cfg.L2.StandardERC20Gateway),
|
||||
common.HexToAddress(cfg.L2.WETHGatewayAddr),
|
||||
}
|
||||
|
||||
l1crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, crossmsg.L1ReorgHandling)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l1 cross message fetcher", "error", err)
|
||||
}
|
||||
|
||||
go l1crossMsgFetcher.Start()
|
||||
defer l1crossMsgFetcher.Stop()
|
||||
|
||||
l2crossMsgFetcher, err := crossmsg.NewMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, crossmsg.L2ReorgHandling)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l2 cross message fetcher", "error", err)
|
||||
}
|
||||
|
||||
go l2crossMsgFetcher.Start()
|
||||
defer l2crossMsgFetcher.Stop()
|
||||
|
||||
CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
|
||||
// BlockTimestamp fetcher for l1 and l2
|
||||
l1BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, CrossMsgOrm.UpdateL1BlockTimestamp, CrossMsgOrm.GetL1EarliestNoBlockTimestampHeight)
|
||||
go l1BlockTimeFetcher.Start()
|
||||
defer l1BlockTimeFetcher.Stop()
|
||||
|
||||
l2BlockTimeFetcher := crossmsg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, CrossMsgOrm.UpdateL2BlockTimestamp, CrossMsgOrm.GetL2EarliestNoBlockTimestampHeight)
|
||||
go l2BlockTimeFetcher.Start()
|
||||
defer l2BlockTimeFetcher.Stop()
|
||||
|
||||
// Proof updater and batch fetcher
|
||||
l2msgProofUpdater := messageproof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db)
|
||||
batchFetcher := crossmsg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater)
|
||||
go batchFetcher.Start()
|
||||
defer batchFetcher.Stop()
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run event watcher cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "bridge-history-api/cmd/cross_msg_fetcher/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -6,17 +6,13 @@ import (
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"bridge-history-api/utils"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
// Set up database app info.
|
||||
app *cli.App
|
||||
)
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
app = cli.NewApp()
|
||||
// Set up database app info.
|
||||
app.Name = "db_cli"
|
||||
app.Usage = "The Scroll Bridge-history-api DB CLI"
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
@@ -60,7 +56,8 @@ func init() {
|
||||
Name: "version",
|
||||
Usage: "Rollback to the specified version.",
|
||||
Value: 0,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/orm/migrate"
|
||||
"bridge-history-api/utils"
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm/migrate"
|
||||
)
|
||||
|
||||
func getConfig(ctx *cli.Context) (*config.Config, error) {
|
||||
@@ -19,8 +21,8 @@ func getConfig(ctx *cli.Context) (*config.Config, error) {
|
||||
return dbCfg, nil
|
||||
}
|
||||
|
||||
func initDB(dbCfg *config.DBConfig) (*gorm.DB, error) {
|
||||
return utils.InitDB(dbCfg)
|
||||
func initDB(dbCfg *database.Config) (*gorm.DB, error) {
|
||||
return database.InitDB(dbCfg)
|
||||
}
|
||||
|
||||
// resetDB clean or reset database.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package main
|
||||
|
||||
import "bridge-history-api/cmd/db_cli/app"
|
||||
import "scroll-tech/bridge-history-api/cmd/db_cli/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
|
||||
93
bridge-history-api/cmd/fetcher/app/app.go
Normal file
93
bridge-history-api/cmd/fetcher/app/app.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/controller/fetcher"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
app = cli.NewApp()
|
||||
|
||||
app.Action = action
|
||||
app.Name = "Scroll Bridge History API Message Fetcher"
|
||||
app.Usage = "The Scroll Bridge History API Message Fetcher"
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
defer cancel()
|
||||
|
||||
l1Client, err := ethclient.Dial(cfg.L1.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to connect to L1 geth", "endpoint", cfg.L1.Endpoint, "err", err)
|
||||
}
|
||||
|
||||
l2Client, err := ethclient.Dial(cfg.L2.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to connect to L2 geth", "endpoint", cfg.L2.Endpoint, "err", err)
|
||||
}
|
||||
|
||||
db, err := database.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := database.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
observability.Server(ctx, db)
|
||||
|
||||
l1MessageFetcher := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
|
||||
go l1MessageFetcher.Start()
|
||||
|
||||
l2MessageFetcher := fetcher.NewL2MessageFetcher(subCtx, cfg.L2, db, l2Client)
|
||||
go l2MessageFetcher.Start()
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run event watcher cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
7
bridge-history-api/cmd/fetcher/main.go
Normal file
7
bridge-history-api/cmd/fetcher/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge-history-api/cmd/fetcher/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
53
bridge-history-api/conf/config.json
Normal file
53
bridge-history-api/conf/config.json
Normal file
@@ -0,0 +1,53 @@
|
||||
{
|
||||
"L1": {
|
||||
"confirmation": 0,
|
||||
"endpoint": "https://rpc.ankr.com/eth",
|
||||
"startHeight": 18306000,
|
||||
"blockTime": 10,
|
||||
"fetchLimit": 30,
|
||||
"MessengerAddr": "0x6774Bcbd5ceCeF1336b5300fb5186a12DDD8b367",
|
||||
"ETHGatewayAddr": "0x7F2b8C31F88B6006c382775eea88297Ec1e3E905",
|
||||
"WETHGatewayAddr": "0x7AC440cAe8EB6328de4fA621163a792c1EA9D4fE",
|
||||
"StandardERC20GatewayAddr": "0xD8A791fE2bE73eb6E6cF1eb0cb3F36adC9B3F8f9",
|
||||
"CustomERC20GatewayAddr": "0xb2b10a289A229415a124EFDeF310C10cb004B6ff",
|
||||
"ERC721GatewayAddr": "0x6260aF48e8948617b8FA17F4e5CEa2d21D21554B",
|
||||
"ERC1155GatewayAddr": "0xb94f7F6ABcb811c5Ac709dE14E37590fcCd975B6",
|
||||
"USDCGatewayAddr": "0xf1AF3b23DE0A5Ca3CAb7261cb0061C0D779A5c7B",
|
||||
"LIDOGatewayAddr": "0x6625C6332c9F91F2D27c304E729B86db87A3f504",
|
||||
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
|
||||
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
|
||||
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
|
||||
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
|
||||
},
|
||||
"L2": {
|
||||
"confirmation": 0,
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"blockTime": 3,
|
||||
"fetchLimit": 100,
|
||||
"MessengerAddr": "0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC",
|
||||
"ETHGatewayAddr": "0x6EA73e05AdC79974B931123675ea8F78FfdacDF0",
|
||||
"WETHGatewayAddr": "0x7003E7B7186f0E6601203b99F7B8DECBfA391cf9",
|
||||
"StandardERC20GatewayAddr": "0xE2b4795039517653c5Ae8C2A9BFdd783b48f447A",
|
||||
"CustomERC20GatewayAddr": "0x64CCBE37c9A82D85A1F2E74649b7A42923067988",
|
||||
"ERC721GatewayAddr": "0x7bC08E1c04fb41d75F1410363F0c5746Eae80582",
|
||||
"ERC1155GatewayAddr": "0x62597Cc19703aF10B58feF87B0d5D29eFE263bcc",
|
||||
"USDCGatewayAddr": "0x33B60d5Dd260d453cAC3782b0bDC01ce84672142",
|
||||
"LIDOGatewayAddr": "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9",
|
||||
"DAIGatewayAddr": "0xaC78dff3A87b5b534e366A93E785a0ce8fA6Cc62",
|
||||
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79"
|
||||
},
|
||||
"db": {
|
||||
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",
|
||||
"driverName": "postgres",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"redis": {
|
||||
"address": "localhost:6379",
|
||||
"username": "default",
|
||||
"password": "",
|
||||
"local": true,
|
||||
"minIdleConns": 10,
|
||||
"readTimeoutMs": 500
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"batchInfoFetcher": {
|
||||
"batchIndexStartBlock": 9091265,
|
||||
"ScrollChainAddr": "0xcD00DB804C819175B381b2B44Aa16A391c8a01D6"
|
||||
},
|
||||
"l1": {
|
||||
"confirmation": 64,
|
||||
"endpoint": "https://rpc.ankr.com/eth_goerli",
|
||||
"startHeight": 9090194 ,
|
||||
"blockTime": 10,
|
||||
"MessengerAddr": "0x326517Eb8eB1Ce5eaB5b513C2e9A24839b402d90",
|
||||
"ETHGatewayAddr": "0x8305cB7B8448677736095965B63d7431017328fe",
|
||||
"WETHGatewayAddr": "0xe3bA3c60d99a2d9a5f817734bC85353470b23931",
|
||||
"StandardERC20Gateway": "0x16c1079B27eD9c363B7D08aC5Ae937A398972A5C",
|
||||
"CustomERC20GatewayAddr": "0x61f08caD3d6F77801167d3bA8669433701586643",
|
||||
"ERC721GatewayAddr": "0x4A73D25A4C99CB912acaf6C5B5e554f2982201c5",
|
||||
"ERC1155GatewayAddr": "0xa3F5DD3033698c2832C53f3C3Fe6E062F58cD808"
|
||||
},
|
||||
"l2": {
|
||||
"confirmation": 1,
|
||||
"endpoint": "http://staging-l2geth-rpc0.scroll.tech:8545",
|
||||
"blockTime": 3,
|
||||
"startHeight": 0,
|
||||
"CustomERC20GatewayAddr": "0x905db21f836749fEeD12de781afc4A5Ab4Dd0d51",
|
||||
"ERC721GatewayAddr": "0xC53D835514780664BCd7eCfcE7c2E5d9554dc41B",
|
||||
"StandardERC20Gateway": "0x90271634BCB020e06ea4840C3f7aa61b8F860651",
|
||||
"MessengerAddr": "0xE8b0956Ac75c65Aa1669e83888DA13afF2E108f4",
|
||||
"ETHGatewayAddr": "0xD5938590D5dD8ce95812D4D515a219C12C551D67",
|
||||
"WETHGatewayAddr": "0xb0aaA582564fade4232a16fdB1383004A6A7247F",
|
||||
"ERC1155GatewayAddr": "0x4f33B1655619c2C0B7C450128Df760B4365Cb549"
|
||||
},
|
||||
"db": {
|
||||
"dsn": "postgres://postgres:1234@localhost:5444/test?sslmode=disable",
|
||||
"driverName": "postgres",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"server": {
|
||||
"hostPort": "20006"
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// BatchInfoFetcherConfig is the configuration of BatchInfoFetcher
|
||||
type BatchInfoFetcherConfig struct {
|
||||
BatchIndexStartBlock uint64 `json:"batchIndexStartBlock"`
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
}
|
||||
|
||||
// DBConfig db config
|
||||
type DBConfig struct {
|
||||
// data source name
|
||||
DSN string `json:"dsn"`
|
||||
DriverName string `json:"driverName"`
|
||||
|
||||
MaxOpenNum int `json:"maxOpenNum"`
|
||||
MaxIdleNum int `json:"maxIdleNum"`
|
||||
}
|
||||
|
||||
// LayerConfig is the configuration of Layer1/Layer2
|
||||
type LayerConfig struct {
|
||||
Confirmation uint64 `json:"confirmation"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
StartHeight uint64 `json:"startHeight"`
|
||||
BlockTime int64 `json:"blockTime"`
|
||||
MessengerAddr string `json:"MessengerAddr"`
|
||||
ETHGatewayAddr string `json:"ETHGatewayAddr"`
|
||||
WETHGatewayAddr string `json:"WETHGatewayAddr"`
|
||||
StandardERC20Gateway string `json:"StandardERC20Gateway"`
|
||||
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
|
||||
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
|
||||
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
|
||||
}
|
||||
|
||||
// ServerConfig is the configuration of the bridge history backend server port
|
||||
type ServerConfig struct {
|
||||
HostPort string `json:"hostPort"`
|
||||
}
|
||||
|
||||
// Config is the configuration of the bridge history backend
|
||||
type Config struct {
|
||||
// chain config
|
||||
L1 *LayerConfig `json:"l1"`
|
||||
L2 *LayerConfig `json:"l2"`
|
||||
|
||||
// data source name
|
||||
DB *DBConfig `json:"db"`
|
||||
Server *ServerConfig `json:"server"`
|
||||
BatchInfoFetcher *BatchInfoFetcherConfig `json:"batchInfoFetcher"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
func NewConfig(file string) (*Config, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &Config{}
|
||||
err = json.Unmarshal(buf, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package crossmsg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/crossmsg/messageproof"
|
||||
"bridge-history-api/orm"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// BatchInfoFetcher fetches batch info from l1 chain and update db
|
||||
type BatchInfoFetcher struct {
|
||||
ctx context.Context
|
||||
scrollChainAddr common.Address
|
||||
batchInfoStartNumber uint64
|
||||
confirmation uint64
|
||||
blockTimeInSec int
|
||||
client *ethclient.Client
|
||||
db *gorm.DB
|
||||
rollupOrm *orm.RollupBatch
|
||||
msgProofUpdater *messageproof.MsgProofUpdater
|
||||
}
|
||||
|
||||
// NewBatchInfoFetcher creates a new BatchInfoFetcher instance
|
||||
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db *gorm.DB, msgProofUpdater *messageproof.MsgProofUpdater) *BatchInfoFetcher {
|
||||
return &BatchInfoFetcher{
|
||||
ctx: ctx,
|
||||
scrollChainAddr: scrollChainAddr,
|
||||
batchInfoStartNumber: batchInfoStartNumber,
|
||||
confirmation: confirmation,
|
||||
blockTimeInSec: blockTimeInSec,
|
||||
client: client,
|
||||
db: db,
|
||||
rollupOrm: orm.NewRollupBatch(db),
|
||||
msgProofUpdater: msgProofUpdater,
|
||||
}
|
||||
}
|
||||
|
||||
// Start the BatchInfoFetcher
|
||||
func (b *BatchInfoFetcher) Start() {
|
||||
log.Info("BatchInfoFetcher Start")
|
||||
// Fetch batch info at beginning
|
||||
// Then start msg proof updater after db have some bridge batch
|
||||
err := b.fetchBatchInfo()
|
||||
if err != nil {
|
||||
log.Error("fetch batch info at beginning failed: ", "err", err)
|
||||
}
|
||||
|
||||
go b.msgProofUpdater.Start()
|
||||
|
||||
go func() {
|
||||
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
err := b.fetchBatchInfo()
|
||||
if err != nil {
|
||||
log.Error("fetch batch info failed: ", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the BatchInfoFetcher and call msg proof updater to stop
|
||||
func (b *BatchInfoFetcher) Stop() {
|
||||
log.Info("BatchInfoFetcher Stop")
|
||||
b.msgProofUpdater.Stop()
|
||||
}
|
||||
|
||||
func (b *BatchInfoFetcher) fetchBatchInfo() error {
|
||||
number, err := utils.GetSafeBlockNumber(b.ctx, b.client, b.confirmation)
|
||||
if err != nil {
|
||||
log.Error("Can not get latest block number: ", "err", err)
|
||||
return err
|
||||
}
|
||||
latestBatchHeight, err := b.rollupOrm.GetLatestRollupBatchProcessedHeight(b.ctx)
|
||||
if err != nil {
|
||||
log.Error("Can not get latest BatchInfo: ", "err", err)
|
||||
return err
|
||||
}
|
||||
var startHeight uint64
|
||||
if latestBatchHeight == 0 {
|
||||
log.Info("no batch record in database, start from batchInfoStartNumber", "batchInfoStartNumber", b.batchInfoStartNumber)
|
||||
startHeight = b.batchInfoStartNumber
|
||||
} else {
|
||||
startHeight = latestBatchHeight + 1
|
||||
}
|
||||
if startHeight < b.batchInfoStartNumber {
|
||||
startHeight = b.batchInfoStartNumber
|
||||
}
|
||||
for from := startHeight; number >= from; from += fetchLimit {
|
||||
to := from + fetchLimit - 1
|
||||
// number - confirmation can never less than 0 since the for loop condition
|
||||
// but watch out the overflow
|
||||
if to > number {
|
||||
to = number
|
||||
}
|
||||
// filter logs to fetch batches
|
||||
err = FetchAndSaveBatchIndex(b.ctx, b.client, b.db, int64(from), int64(to), b.scrollChainAddr)
|
||||
if err != nil {
|
||||
log.Error("Can not fetch and save from chain: ", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
package crossmsg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// GetEarliestNoBlockTimestampHeightFunc is a function type that gets the earliest record without block timestamp from database
|
||||
type GetEarliestNoBlockTimestampHeightFunc func(ctx context.Context) (uint64, error)
|
||||
|
||||
// UpdateBlockTimestampFunc is a function type that updates block timestamp into database
|
||||
type UpdateBlockTimestampFunc func(ctx context.Context, height uint64, timestamp time.Time) error
|
||||
|
||||
// BlockTimestampFetcher fetches block timestamp from blockchain and saves them to database
|
||||
type BlockTimestampFetcher struct {
|
||||
ctx context.Context
|
||||
confirmation uint64
|
||||
blockTimeInSec int
|
||||
client *ethclient.Client
|
||||
updateBlockTimestampFunc UpdateBlockTimestampFunc
|
||||
getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc
|
||||
}
|
||||
|
||||
// NewBlockTimestampFetcher creates a new BlockTimestampFetcher instance
|
||||
func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTimeInSec int, client *ethclient.Client, updateBlockTimestampFunc UpdateBlockTimestampFunc, getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc) *BlockTimestampFetcher {
|
||||
return &BlockTimestampFetcher{
|
||||
ctx: ctx,
|
||||
confirmation: confirmation,
|
||||
blockTimeInSec: blockTimeInSec,
|
||||
client: client,
|
||||
getEarliestNoBlockTimestampHeightFunc: getEarliestNoBlockTimestampHeightFunc,
|
||||
updateBlockTimestampFunc: updateBlockTimestampFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// Start the BlockTimestampFetcher
|
||||
func (b *BlockTimestampFetcher) Start() {
|
||||
go func() {
|
||||
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
number, err := b.client.BlockNumber(b.ctx)
|
||||
if err != nil {
|
||||
log.Error("Can not get latest block number", "err", err)
|
||||
continue
|
||||
}
|
||||
startHeight, err := b.getEarliestNoBlockTimestampHeightFunc(b.ctx)
|
||||
if err != nil {
|
||||
log.Error("Can not get latest record without block timestamp", "err", err)
|
||||
continue
|
||||
}
|
||||
for height := startHeight; number >= height+b.confirmation && height > 0; {
|
||||
block, err := b.client.HeaderByNumber(b.ctx, new(big.Int).SetUint64(height))
|
||||
if err != nil {
|
||||
log.Error("Can not get block by number", "err", err)
|
||||
break
|
||||
}
|
||||
err = b.updateBlockTimestampFunc(b.ctx, height, time.Unix(int64(block.Time), 0))
|
||||
if err != nil {
|
||||
log.Error("Can not update blockTimestamp into DB ", "err", err)
|
||||
break
|
||||
}
|
||||
height, err = b.getEarliestNoBlockTimestampHeightFunc(b.ctx)
|
||||
if err != nil {
|
||||
log.Error("Can not get latest record without block timestamp", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the BlockTimestampFetcher and log the info
|
||||
func (b *BlockTimestampFetcher) Stop() {
|
||||
log.Info("BlockTimestampFetcher Stop")
|
||||
}
|
||||
@@ -1,215 +0,0 @@
|
||||
package crossmsg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/modern-go/reflect2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// MsgFetcher fetches cross message events from blockchain and saves them to database
|
||||
type MsgFetcher struct {
|
||||
ctx context.Context
|
||||
config *config.LayerConfig
|
||||
db *gorm.DB
|
||||
client *ethclient.Client
|
||||
worker *FetchEventWorker
|
||||
reorgHandling ReorgHandling
|
||||
addressList []common.Address
|
||||
cachedHeaders []*types.Header
|
||||
mu sync.Mutex
|
||||
reorgStartCh chan struct{}
|
||||
reorgEndCh chan struct{}
|
||||
}
|
||||
|
||||
// NewMsgFetcher creates a new MsgFetcher instance
|
||||
func NewMsgFetcher(ctx context.Context, config *config.LayerConfig, db *gorm.DB, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*MsgFetcher, error) {
|
||||
msgFetcher := &MsgFetcher{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
db: db,
|
||||
client: client,
|
||||
worker: worker,
|
||||
reorgHandling: reorg,
|
||||
addressList: addressList,
|
||||
cachedHeaders: make([]*types.Header, 0),
|
||||
reorgStartCh: make(chan struct{}),
|
||||
reorgEndCh: make(chan struct{}),
|
||||
}
|
||||
return msgFetcher, nil
|
||||
}
|
||||
|
||||
// Start the MsgFetcher
|
||||
func (c *MsgFetcher) Start() {
|
||||
log.Info("MsgFetcher Start")
|
||||
// fetch missing events from finalized blocks, we don't handle reorgs here
|
||||
c.forwardFetchAndSaveMissingEvents(c.config.Confirmation)
|
||||
|
||||
tick := time.NewTicker(time.Duration(c.config.BlockTime) * time.Second)
|
||||
headerTick := time.NewTicker(time.Duration(c.config.BlockTime/2) * time.Second)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-c.reorgStartCh:
|
||||
// create timeout here
|
||||
timeout := time.NewTicker(300 * time.Second)
|
||||
select {
|
||||
case <-c.reorgEndCh:
|
||||
log.Info("Reorg finished")
|
||||
timeout.Stop()
|
||||
case <-timeout.C:
|
||||
// TODO: need to notify the on-call members to handle reorg manually
|
||||
timeout.Stop()
|
||||
log.Crit("Reorg timeout")
|
||||
}
|
||||
case <-c.ctx.Done():
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
c.mu.Lock()
|
||||
c.forwardFetchAndSaveMissingEvents(1)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
headerTick.Stop()
|
||||
return
|
||||
case <-headerTick.C:
|
||||
c.fetchMissingLatestHeaders()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop the MsgFetcher and log the info
|
||||
func (c *MsgFetcher) Stop() {
|
||||
log.Info("MsgFetcher Stop")
|
||||
}
|
||||
|
||||
// forwardFetchAndSaveMissingEvents will fetch all events from the latest processed height to the latest block number.
|
||||
func (c *MsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
|
||||
// if we fetch to the latest block, shall not exceed cachedHeaders
|
||||
var number uint64
|
||||
var err error
|
||||
if len(c.cachedHeaders) != 0 && confirmation == 0 {
|
||||
number = c.cachedHeaders[len(c.cachedHeaders)-1].Number.Uint64() - 1
|
||||
} else {
|
||||
number, err = utils.GetSafeBlockNumber(c.ctx, c.client, confirmation)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("%s: can not get the safe block number", c.worker.Name), "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if reflect2.IsNil(c.worker.G) || reflect2.IsNil(c.worker.F) {
|
||||
log.Error(fmt.Sprintf("%s: invalid get/fetch function", c.worker.Name))
|
||||
return
|
||||
}
|
||||
processedHeight, err := c.worker.G(c.ctx, c.db)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("%s: can not get latest processed block height", c.worker.Name))
|
||||
}
|
||||
log.Info(fmt.Sprintf("%s: ", c.worker.Name), "height", processedHeight)
|
||||
if processedHeight <= 0 || processedHeight < c.config.StartHeight {
|
||||
processedHeight = c.config.StartHeight
|
||||
} else {
|
||||
processedHeight++
|
||||
}
|
||||
for from := processedHeight; from <= number; from += fetchLimit {
|
||||
to := from + fetchLimit - 1
|
||||
if to > number {
|
||||
to = number
|
||||
}
|
||||
// watch for overflow here, tho its unlikely to happen
|
||||
err := c.worker.F(c.ctx, c.client, c.db, int64(from), int64(to), c.addressList)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("%s: failed!", c.worker.Name), "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MsgFetcher) fetchMissingLatestHeaders() {
|
||||
var start int64
|
||||
number, err := c.client.BlockNumber(c.ctx)
|
||||
if err != nil {
|
||||
log.Error("fetchMissingLatestHeaders(): can not get the latest block number", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(c.cachedHeaders) > 0 {
|
||||
start = c.cachedHeaders[len(c.cachedHeaders)-1].Number.Int64() + 1
|
||||
} else {
|
||||
start = int64(number - c.config.Confirmation)
|
||||
}
|
||||
for i := start; i <= int64(number); i++ {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
close(c.reorgStartCh)
|
||||
close(c.reorgEndCh)
|
||||
return
|
||||
default:
|
||||
header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(i))
|
||||
if err != nil {
|
||||
log.Error("failed to get latest header", "err", err)
|
||||
return
|
||||
}
|
||||
if len(c.cachedHeaders) == 0 {
|
||||
c.cachedHeaders = MergeAddIntoHeaderList(c.cachedHeaders, []*types.Header{header}, int(c.config.Confirmation))
|
||||
return
|
||||
}
|
||||
//check if the fetched header is child from the last cached header
|
||||
if IsParentAndChild(c.cachedHeaders[len(c.cachedHeaders)-1], header) {
|
||||
c.cachedHeaders = MergeAddIntoHeaderList(c.cachedHeaders, []*types.Header{header}, int(c.config.Confirmation))
|
||||
log.Debug("fetched block into cache", "height", header.Number, "parent hash", header.ParentHash.Hex(), "block hash", c.cachedHeaders[len(c.cachedHeaders)-1].Hash().Hex(), "len", len(c.cachedHeaders))
|
||||
continue
|
||||
}
|
||||
// reorg happened
|
||||
log.Warn("Reorg happened", "height", header.Number, "parent hash", header.ParentHash.Hex(), "last cached hash", c.cachedHeaders[len(c.cachedHeaders)-1].Hash().Hex(), "last cached height", c.cachedHeaders[len(c.cachedHeaders)-1].Number)
|
||||
c.reorgStartCh <- struct{}{}
|
||||
// waiting here if there is fetcher running
|
||||
c.mu.Lock()
|
||||
index, ok, validHeaders := BackwardFindReorgBlock(c.ctx, c.cachedHeaders, c.client, header)
|
||||
if !ok {
|
||||
log.Error("Reorg happened too earlier than cached headers", "reorg height", header.Number)
|
||||
num, getSafeErr := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation)
|
||||
if getSafeErr != nil {
|
||||
log.Crit("Can not get safe number during reorg, quit the process", "err", err)
|
||||
}
|
||||
// clear all our saved data, because no data is safe now
|
||||
err = c.reorgHandling(c.ctx, num, c.db)
|
||||
// if handling success then we can update the cachedHeaders
|
||||
if err == nil {
|
||||
c.cachedHeaders = c.cachedHeaders[:0]
|
||||
}
|
||||
c.mu.Unlock()
|
||||
c.reorgEndCh <- struct{}{}
|
||||
return
|
||||
}
|
||||
err = c.reorgHandling(c.ctx, c.cachedHeaders[index].Number.Uint64(), c.db)
|
||||
// if handling success then we can update the cachedHeaders
|
||||
if err == nil {
|
||||
c.cachedHeaders = c.cachedHeaders[:index+1]
|
||||
c.cachedHeaders = MergeAddIntoHeaderList(c.cachedHeaders, validHeaders, int(c.config.Confirmation))
|
||||
}
|
||||
c.mu.Unlock()
|
||||
c.reorgEndCh <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,213 +0,0 @@
|
||||
package crossmsg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
geth "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
backendabi "bridge-history-api/abi"
|
||||
"bridge-history-api/orm"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// Todo : read from config
|
||||
var (
|
||||
// the number of blocks fetch per round
|
||||
fetchLimit = uint64(3000)
|
||||
)
|
||||
|
||||
// FetchAndSave is a function type that fetches events from blockchain and saves them to database
|
||||
type FetchAndSave func(ctx context.Context, client *ethclient.Client, database *gorm.DB, from int64, to int64, addressList []common.Address) error
|
||||
|
||||
// GetLatestProcessed is a function type that gets the latest processed block height from database
|
||||
type GetLatestProcessed func(ctx context.Context, db *gorm.DB) (uint64, error)
|
||||
|
||||
// FetchEventWorker defines worker with fetch and save function, processed number getter, and name
|
||||
type FetchEventWorker struct {
|
||||
F FetchAndSave
|
||||
G GetLatestProcessed
|
||||
Name string
|
||||
}
|
||||
|
||||
// GetLatestL1ProcessedHeight get L1 the latest processed height
|
||||
func GetLatestL1ProcessedHeight(ctx context.Context, db *gorm.DB) (uint64, error) {
|
||||
l1CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
crossHeight, err := l1CrossMsgOrm.GetLatestL1ProcessedHeight(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 cross message processed height: ", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
relayedHeight, err := relayedOrm.GetLatestRelayedHeightOnL1(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 relayed message processed height: ", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
if crossHeight > relayedHeight {
|
||||
return crossHeight, nil
|
||||
}
|
||||
return relayedHeight, nil
|
||||
}
|
||||
|
||||
// GetLatestL2ProcessedHeight get L2 latest processed height
|
||||
func GetLatestL2ProcessedHeight(ctx context.Context, db *gorm.DB) (uint64, error) {
|
||||
l2CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(db)
|
||||
crossHeight, err := l2CrossMsgOrm.GetLatestL2ProcessedHeight(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 cross message processed height", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
relayedHeight, err := relayedOrm.GetLatestRelayedHeightOnL2(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 relayed message processed height", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
l2SentHeight, err := l2SentMsgOrm.GetLatestSentMsgHeightOnL2(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 sent message processed height", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
maxHeight := crossHeight
|
||||
if maxHeight < relayedHeight {
|
||||
maxHeight = relayedHeight
|
||||
}
|
||||
if maxHeight < l2SentHeight {
|
||||
maxHeight = l2SentHeight
|
||||
}
|
||||
return maxHeight, nil
|
||||
}
|
||||
|
||||
// L1FetchAndSaveEvents fetch and save events on L1
|
||||
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, addrList []common.Address) error {
|
||||
l1CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: addrList,
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 7)
|
||||
query.Topics[0][0] = backendabi.L1DepositETHSig
|
||||
query.Topics[0][1] = backendabi.L1DepositERC20Sig
|
||||
query.Topics[0][2] = backendabi.L1RelayedMessageEventSignature
|
||||
query.Topics[0][3] = backendabi.L1SentMessageEventSignature
|
||||
query.Topics[0][4] = backendabi.L1DepositERC721Sig
|
||||
query.Topics[0][5] = backendabi.L1DepositERC1155Sig
|
||||
query.Topics[0][6] = backendabi.L1DepositWETHSig
|
||||
|
||||
logs, err := client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
log.Warn("Failed to get l1 event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
depositL1CrossMsgs, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
err = db.Transaction(func(tx *gorm.DB) error {
|
||||
if txErr := l1CrossMsgOrm.InsertL1CrossMsg(ctx, depositL1CrossMsgs, tx); txErr != nil {
|
||||
log.Error("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
if txErr := relayedOrm.InsertRelayedMsg(ctx, relayedMsg, tx); txErr != nil {
|
||||
log.Error("l1FetchAndSaveEvents: Failed to insert relayed msg event logs", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to finish transaction", "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// L2FetchAndSaveEvents fetche and save events on L2
|
||||
func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, addrList []common.Address) error {
|
||||
l2CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(db)
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: addrList,
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 7)
|
||||
query.Topics[0][0] = backendabi.L2WithdrawETHSig
|
||||
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
|
||||
query.Topics[0][2] = backendabi.L2RelayedMessageEventSignature
|
||||
query.Topics[0][3] = backendabi.L2SentMessageEventSignature
|
||||
query.Topics[0][4] = backendabi.L2WithdrawERC721Sig
|
||||
query.Topics[0][5] = backendabi.L2WithdrawERC1155Sig
|
||||
query.Topics[0][6] = backendabi.L2WithdrawWETHSig
|
||||
|
||||
logs, err := client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
log.Warn("Failed to get l2 event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
depositL2CrossMsgs, relayedMsg, l2SentMsgs, err := utils.ParseBackendL2EventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.Transaction(func(tx *gorm.DB) error {
|
||||
if txErr := l2CrossMsgOrm.InsertL2CrossMsg(ctx, depositL2CrossMsgs, tx); txErr != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := relayedOrm.InsertRelayedMsg(ctx, relayedMsg, tx); txErr != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := l2SentMsgOrm.InsertL2SentMsg(ctx, l2SentMsgs, tx); txErr != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// FetchAndSaveBatchIndex fetche and save batch index
|
||||
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, db *gorm.DB, from int64, to int64, scrollChainAddr common.Address) error {
|
||||
rollupBatchOrm := orm.NewRollupBatch(db)
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{scrollChainAddr},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 1)
|
||||
query.Topics[0][0] = backendabi.L1CommitBatchEventSignature
|
||||
logs, err := client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
log.Warn("Failed to get batch commit event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
rollupBatches, err := utils.ParseBatchInfoFromScrollChain(ctx, client, logs)
|
||||
if err != nil {
|
||||
log.Error("FetchAndSaveBatchIndex: Failed to parse batch commit msg event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
if txErr := rollupBatchOrm.InsertRollupBatch(ctx, rollupBatches); txErr != nil {
|
||||
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,249 +0,0 @@
|
||||
package messageproof
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/orm"
|
||||
)
|
||||
|
||||
// MsgProofUpdater is used to update message proof in db
|
||||
type MsgProofUpdater struct {
|
||||
ctx context.Context
|
||||
db *gorm.DB
|
||||
l2SentMsgOrm *orm.L2SentMsg
|
||||
rollupOrm *orm.RollupBatch
|
||||
withdrawTrie *WithdrawTrie
|
||||
}
|
||||
|
||||
// NewMsgProofUpdater new MsgProofUpdater instance
|
||||
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db *gorm.DB) *MsgProofUpdater {
|
||||
return &MsgProofUpdater{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
l2SentMsgOrm: orm.NewL2SentMsg(db),
|
||||
rollupOrm: orm.NewRollupBatch(db),
|
||||
withdrawTrie: NewWithdrawTrie(),
|
||||
}
|
||||
}
|
||||
|
||||
// Start the MsgProofUpdater
|
||||
func (m *MsgProofUpdater) Start() {
|
||||
log.Info("MsgProofUpdater Start")
|
||||
m.initialize(m.ctx)
|
||||
go func() {
|
||||
tick := time.NewTicker(10 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
latestBatch, err := m.rollupOrm.GetLatestRollupBatch(m.ctx)
|
||||
if err != nil {
|
||||
log.Warn("MsgProofUpdater: Can not get latest RollupBatch: ", "err", err)
|
||||
continue
|
||||
}
|
||||
if latestBatch == nil {
|
||||
continue
|
||||
}
|
||||
latestBatchIndexWithProof, err := m.l2SentMsgOrm.GetLatestL2SentMsgBatchIndex(m.ctx)
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: Can not get latest L2SentMsgBatchIndex: ", "err", err)
|
||||
continue
|
||||
}
|
||||
log.Info("latest batch with proof", "batch_index", latestBatchIndexWithProof)
|
||||
var start uint64
|
||||
if latestBatchIndexWithProof < 0 {
|
||||
start = 1
|
||||
} else {
|
||||
start = uint64(latestBatchIndexWithProof) + 1
|
||||
}
|
||||
for i := start; i <= latestBatch.BatchIndex; i++ {
|
||||
batch, err := m.rollupOrm.GetRollupBatchByIndex(m.ctx, i)
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: Can not get RollupBatch: ", "err", err, "index", i)
|
||||
break
|
||||
}
|
||||
// get all l2 messages in this batch
|
||||
msgs, proofs, err := m.appendL2Messages(batch.StartBlockNumber, batch.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: can not append l2messages", "startBlockNumber", batch.StartBlockNumber, "endBlockNumber", batch.EndBlockNumber, "err", err)
|
||||
break
|
||||
}
|
||||
// here we update batch withdraw root
|
||||
err = m.rollupOrm.UpdateRollupBatchWithdrawRoot(m.ctx, batch.BatchIndex, m.withdrawTrie.MessageRoot().Hex())
|
||||
if err != nil {
|
||||
// if failed better restart the binary
|
||||
log.Error("MsgProofUpdater: can not update batch withdraw root", "err", err)
|
||||
break
|
||||
}
|
||||
err = m.updateMsgProof(msgs, proofs, batch.BatchIndex)
|
||||
if err != nil {
|
||||
// if failed better restart the binary
|
||||
log.Error("MsgProofUpdater: can not update msg proof", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
// Stop the MsgProofUpdater
|
||||
func (m *MsgProofUpdater) Stop() {
|
||||
log.Info("MsgProofUpdater Stop")
|
||||
}
|
||||
|
||||
func (m *MsgProofUpdater) initialize(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
err := m.initializeWithdrawTrie()
|
||||
if err != nil {
|
||||
log.Error("can not initialize withdraw trie", "err", err)
|
||||
// give it some time to retry
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MsgProofUpdater) initializeWithdrawTrie() error {
|
||||
var batch *orm.RollupBatch
|
||||
firstMsg, err := m.l2SentMsgOrm.GetL2SentMessageByNonce(m.ctx, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get first l2 message: %v", err)
|
||||
}
|
||||
// no l2 message
|
||||
// TO DO: check if we really dont have l2 sent message with nonce 0
|
||||
if firstMsg == nil {
|
||||
log.Info("No first l2sentmsg in db")
|
||||
return nil
|
||||
}
|
||||
|
||||
// if no batch, return and wait for next try round
|
||||
batch, err = m.rollupOrm.GetLatestRollupBatch(m.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest batch: %v", err)
|
||||
}
|
||||
if batch == nil {
|
||||
return fmt.Errorf("no batch found")
|
||||
}
|
||||
|
||||
var batches []*orm.RollupBatch
|
||||
batchIndex := batch.BatchIndex
|
||||
for {
|
||||
var msg *orm.L2SentMsg
|
||||
msg, err = m.l2SentMsgOrm.GetLatestL2SentMsgLEHeight(m.ctx, batch.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Warn("failed to get l2 sent message less than height", "endBlocknum", batch.EndBlockNumber, "err", err)
|
||||
}
|
||||
if msg != nil && msg.MsgProof != "" {
|
||||
log.Info("Found latest l2 sent msg with proof: ", "msg_proof", msg.MsgProof, "height", msg.Height, "msg_hash", msg.MsgHash)
|
||||
// initialize withdrawTrie
|
||||
proofBytes := common.Hex2Bytes(msg.MsgProof)
|
||||
m.withdrawTrie.Initialize(msg.Nonce, common.HexToHash(msg.MsgHash), proofBytes)
|
||||
break
|
||||
}
|
||||
|
||||
// append unprocessed batch
|
||||
batches = append(batches, batch)
|
||||
|
||||
if batchIndex == 1 {
|
||||
// otherwise overflow
|
||||
// and batchIndex 0 is not in DB
|
||||
// To Do: check if we dont have batch with index 0 in future
|
||||
break
|
||||
}
|
||||
// iterate for next batch
|
||||
batchIndex--
|
||||
|
||||
batch, err = m.rollupOrm.GetRollupBatchByIndex(m.ctx, batchIndex)
|
||||
if err != nil || batch == nil {
|
||||
return fmt.Errorf("failed to get block batch %v: %v", batchIndex, err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Build withdraw trie with pending messages")
|
||||
for i := len(batches) - 1; i >= 0; i-- {
|
||||
b := batches[i]
|
||||
msgs, proofs, err := m.appendL2Messages(b.StartBlockNumber, b.EndBlockNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = m.rollupOrm.UpdateRollupBatchWithdrawRoot(m.ctx, b.BatchIndex, m.withdrawTrie.MessageRoot().Hex())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = m.updateMsgProof(msgs, proofs, b.BatchIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Info("Build withdraw trie finished")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MsgProofUpdater) updateMsgProof(msgs []*orm.L2SentMsg, proofs [][]byte, batchIndex uint64) error {
|
||||
if len(msgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
// this should not happen, but double check
|
||||
if len(msgs) != len(proofs) {
|
||||
return fmt.Errorf("illegal state: len(msgs) != len(proofs)")
|
||||
}
|
||||
err := m.db.Transaction(func(tx *gorm.DB) error {
|
||||
for i, msg := range msgs {
|
||||
proofHex := common.Bytes2Hex(proofs[i])
|
||||
log.Debug("updateMsgProof", "msgHash", msg.MsgHash, "batchIndex", batchIndex, "proof", proofHex)
|
||||
if err := m.l2SentMsgOrm.UpdateL2MessageProof(m.ctx, msg.MsgHash, proofHex, batchIndex, tx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// appendL2Messages will append all messages between firstBlock and lastBlock (both inclusive) to withdrawTrie and compute corresponding merkle proof of each message.
|
||||
func (m *MsgProofUpdater) appendL2Messages(firstBlock, lastBlock uint64) ([]*orm.L2SentMsg, [][]byte, error) {
|
||||
var msgProofs [][]byte
|
||||
messages, err := m.l2SentMsgOrm.GetL2SentMsgMsgHashByHeightRange(m.ctx, firstBlock, lastBlock)
|
||||
if err != nil {
|
||||
log.Error("GetL2SentMsgMsgHashByHeightRange failed", "error", err, "firstBlock", firstBlock, "lastBlock", lastBlock)
|
||||
return messages, msgProofs, err
|
||||
}
|
||||
if len(messages) == 0 {
|
||||
return messages, msgProofs, nil
|
||||
}
|
||||
|
||||
// double check whether nonce is matched
|
||||
if messages[0].Nonce != m.withdrawTrie.NextMessageNonce {
|
||||
log.Error("L2 message nonce mismatch", "expected", m.withdrawTrie.NextMessageNonce, "found", messages[0].Nonce)
|
||||
return messages, msgProofs, fmt.Errorf("l2 message nonce mismatch, expected: %v, found: %v", m.withdrawTrie.NextMessageNonce, messages[0].Nonce)
|
||||
}
|
||||
|
||||
var hashes []common.Hash
|
||||
for _, msg := range messages {
|
||||
hashes = append(hashes, common.HexToHash(msg.MsgHash))
|
||||
}
|
||||
msgProofs = m.withdrawTrie.AppendMessages(hashes)
|
||||
|
||||
return messages, msgProofs, nil
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
package crossmsg_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bridge-history-api/crossmsg"
|
||||
)
|
||||
|
||||
func TestMergeIntoList(t *testing.T) {
|
||||
headers, err := generateHeaders(64)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, headers[0].Hash(), headers[1].ParentHash)
|
||||
headers2, err := generateHeaders(18)
|
||||
assert.NoError(t, err)
|
||||
result := crossmsg.MergeAddIntoHeaderList(headers, headers2, 64)
|
||||
assert.Equal(t, 64, len(result))
|
||||
assert.Equal(t, headers2[len(headers2)-1], result[len(result)-1])
|
||||
assert.NotEqual(t, headers[0], result[0])
|
||||
}
|
||||
|
||||
func generateHeaders(amount int) ([]*types.Header, error) {
|
||||
headers := make([]*types.Header, amount)
|
||||
|
||||
for i := 0; i < amount; i++ {
|
||||
var parentHash common.Hash
|
||||
if i > 0 {
|
||||
parentHash = headers[i-1].Hash()
|
||||
}
|
||||
nonce, err := rand.Int(rand.Reader, big.NewInt(1<<63-1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
difficulty := big.NewInt(131072)
|
||||
|
||||
header := &types.Header{
|
||||
ParentHash: parentHash,
|
||||
UncleHash: types.EmptyUncleHash,
|
||||
Coinbase: common.Address{},
|
||||
Root: common.Hash{},
|
||||
TxHash: types.EmptyRootHash,
|
||||
ReceiptHash: types.EmptyRootHash,
|
||||
Bloom: types.Bloom{},
|
||||
Difficulty: difficulty,
|
||||
Number: big.NewInt(int64(i)),
|
||||
GasLimit: 5000000,
|
||||
GasUsed: 0,
|
||||
Time: uint64(i * 15),
|
||||
Extra: []byte{},
|
||||
MixDigest: common.Hash{},
|
||||
Nonce: types.EncodeNonce(nonce.Uint64()),
|
||||
}
|
||||
headers[i] = header
|
||||
}
|
||||
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// TODO: add more test cases
|
||||
// func TestReorg(t *testing.T)
|
||||
@@ -1,108 +0,0 @@
|
||||
package crossmsg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/orm"
|
||||
)
|
||||
|
||||
// ReorgHandling handles reorg function type
|
||||
type ReorgHandling func(ctx context.Context, reorgHeight uint64, db *gorm.DB) error
|
||||
|
||||
func reverseArray(arr []*types.Header) []*types.Header {
|
||||
for i := 0; i < len(arr)/2; i++ {
|
||||
j := len(arr) - i - 1
|
||||
arr[i], arr[j] = arr[j], arr[i]
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
// IsParentAndChild match the child header ParentHash with parent header Hash
|
||||
func IsParentAndChild(parentHeader *types.Header, header *types.Header) bool {
|
||||
return header.ParentHash == parentHeader.Hash()
|
||||
}
|
||||
|
||||
// MergeAddIntoHeaderList merges two header lists, if exceed the max length then drop the oldest entries
|
||||
func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []*types.Header {
|
||||
mergedArr := append(baseArr, extraArr...)
|
||||
if len(mergedArr) <= maxLength {
|
||||
return mergedArr
|
||||
}
|
||||
|
||||
startIndex := len(mergedArr) - maxLength
|
||||
return mergedArr[startIndex:]
|
||||
}
|
||||
|
||||
// BackwardFindReorgBlock finds the reorg block by backward search
|
||||
func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, lastHeader *types.Header) (int, bool, []*types.Header) {
|
||||
maxStep := len(headers)
|
||||
backwardHeaderList := []*types.Header{lastHeader}
|
||||
for iterRound := 0; iterRound < maxStep; iterRound++ {
|
||||
header, err := client.HeaderByHash(ctx, lastHeader.ParentHash)
|
||||
if err != nil {
|
||||
log.Error("BackwardFindReorgBlock failed", "error", err)
|
||||
return -1, false, nil
|
||||
}
|
||||
backwardHeaderList = append(backwardHeaderList, header)
|
||||
for j := len(headers) - 1; j >= 0; j-- {
|
||||
if IsParentAndChild(headers[j], header) {
|
||||
backwardHeaderList = reverseArray(backwardHeaderList)
|
||||
return j, true, backwardHeaderList
|
||||
}
|
||||
}
|
||||
lastHeader = header
|
||||
}
|
||||
return -1, false, nil
|
||||
}
|
||||
|
||||
// L1ReorgHandling handles l1 reorg
|
||||
func L1ReorgHandling(ctx context.Context, reorgHeight uint64, db *gorm.DB) error {
|
||||
l1CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
err := db.Transaction(func(tx *gorm.DB) error {
|
||||
if err := l1CrossMsgOrm.DeleteL1CrossMsgAfterHeight(ctx, reorgHeight, tx); err != nil {
|
||||
log.Error("delete l1 cross msg from height", "height", reorgHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
if err := relayedOrm.DeleteL1RelayedHashAfterHeight(ctx, reorgHeight, tx); err != nil {
|
||||
log.Error("delete l1 relayed msg from height", "height", reorgHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("l1 reorg handling failed", "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// L2ReorgHandling handles l2 reorg
|
||||
func L2ReorgHandling(ctx context.Context, reorgHeight uint64, db *gorm.DB) error {
|
||||
l2CrossMsgOrm := orm.NewCrossMsg(db)
|
||||
relayedOrm := orm.NewRelayedMsg(db)
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(db)
|
||||
err := db.Transaction(func(tx *gorm.DB) error {
|
||||
if err := l2CrossMsgOrm.DeleteL2CrossMsgFromHeight(ctx, reorgHeight, tx); err != nil {
|
||||
log.Error("delete l2 cross msg from height", "height", reorgHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
if err := relayedOrm.DeleteL2RelayedHashAfterHeight(ctx, reorgHeight, tx); err != nil {
|
||||
log.Error("delete l2 relayed msg from height", "height", reorgHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
if err := l2SentMsgOrm.DeleteL2SentMsgAfterHeight(ctx, reorgHeight, tx); err != nil {
|
||||
log.Error("delete l2 sent msg from height", "height", reorgHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("l2 reorg handling failed", "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1,132 +1,92 @@
|
||||
module bridge-history-api
|
||||
module scroll-tech/bridge-history-api
|
||||
|
||||
go 1.19
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/ethereum/go-ethereum v1.12.2
|
||||
github.com/gin-contrib/cors v1.4.0
|
||||
github.com/gin-contrib/cors v1.5.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.19
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/google/uuid v1.4.0
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
gorm.io/gorm v1.25.2
|
||||
golang.org/x/sync v0.5.0
|
||||
gorm.io/gorm v1.25.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/DataDog/zstd v1.5.2 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.7.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||
github.com/bytedance/sonic v1.9.2 // indirect
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/cockroachdb/errors v1.9.1 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
|
||||
github.com/cockroachdb/redact v1.1.3 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.10.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/docker/docker v23.0.6+incompatible // indirect
|
||||
github.com/ethereum/c-kzg-4844 v0.3.1 // indirect
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.15.5 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.3 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.3.1 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/onsi/gomega v1.27.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.7.1 // indirect
|
||||
github.com/sethvargo/go-retry v0.2.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.11 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.16.0 // indirect
|
||||
golang.org/x/net v0.18.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.11.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
|
||||
)
|
||||
|
||||
@@ -1,29 +1,17 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
|
||||
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
|
||||
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/ClickHouse/ch-go v0.58.2 h1:jSm2szHbT9MCAB1rJ3WuCJqmGLi5UTjlNu+f530UTS0=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.15.0 h1:G0hTKyO8fXXR1bGnZ0DY3vTG01xYfOGW76zgjg5tmC4=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
||||
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
|
||||
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
@@ -32,219 +20,104 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
||||
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
|
||||
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
|
||||
github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
|
||||
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
|
||||
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8=
|
||||
github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk=
|
||||
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk=
|
||||
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM=
|
||||
github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
|
||||
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.10.0 h1:zRh22SR7o4K35SoNqouS9J/TKHTyU2QWaj5ldehyXtA=
|
||||
github.com/consensys/gnark-crypto v0.10.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
|
||||
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
||||
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
|
||||
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
||||
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
|
||||
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg=
|
||||
github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/go-ethereum v1.12.2 h1:eGHJ4ij7oyVqUQn48LBz3B7pvQ8sV0wGJiIE6gDq/6Y=
|
||||
github.com/ethereum/go-ethereum v1.12.2/go.mod h1:1cRAEV+rp/xX0zraSCBnu9Py3HQ+geRMj3HdR+k0wfI=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
|
||||
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T/Lao=
|
||||
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
|
||||
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
|
||||
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=
|
||||
github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs=
|
||||
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
|
||||
github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk=
|
||||
github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
|
||||
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
||||
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||
github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
|
||||
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
|
||||
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
|
||||
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
|
||||
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
|
||||
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
|
||||
github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
|
||||
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
|
||||
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM=
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
|
||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
||||
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
|
||||
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
|
||||
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
|
||||
github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU=
|
||||
github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
|
||||
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
@@ -252,454 +125,205 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
|
||||
github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
|
||||
github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
|
||||
github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
|
||||
github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
|
||||
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
|
||||
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
|
||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
|
||||
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
|
||||
github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=
|
||||
github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4=
|
||||
github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
|
||||
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
|
||||
github.com/pressly/goose/v3 v3.16.0 h1:xMJUsZdHLqSnCqESyKSqEfcYVYsUuup1nrOhaEFftQg=
|
||||
github.com/pressly/goose/v3 v3.16.0/go.mod h1:JwdKVnmCRhnF6XLQs2mHEQtucFD49cQBdRM4UiwkxsM=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c h1:MnAdt80steCDli4SAD0J0spBGNY+gQvbdptNjWztHcw=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c/go.mod h1:4HrFcoStbViFVy/9l/rvKl1XmizVAaPdgqI8v0U8hOc=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
|
||||
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
|
||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dzWP1Lu+A40W883dK/Mr3xyDSM/2MggS8GtHT0qgAnE=
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
|
||||
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
|
||||
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
|
||||
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU=
|
||||
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
|
||||
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
||||
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
|
||||
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
|
||||
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
|
||||
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
|
||||
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
|
||||
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
|
||||
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
|
||||
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
|
||||
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
|
||||
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
|
||||
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
|
||||
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
|
||||
lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
|
||||
modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
|
||||
modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
|
||||
modernc.org/libc v1.32.0 h1:yXatHTrACp3WaKNRCoZwUK7qj5V8ep1XyY0ka4oYcNc=
|
||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
|
||||
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
|
||||
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
|
||||
modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8=
|
||||
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
||||
|
||||
66
bridge-history-api/internal/config/config.go
Normal file
66
bridge-history-api/internal/config/config.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
// LayerConfig is the configuration of Layer1/Layer2
|
||||
type LayerConfig struct {
|
||||
Confirmation uint64 `json:"confirmation"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, otherwise in the current implementation, the message proof could not be successfully updated.
|
||||
BlockTime int64 `json:"blockTime"`
|
||||
FetchLimit uint64 `json:"fetchLimit"`
|
||||
MessengerAddr string `json:"MessengerAddr"`
|
||||
ETHGatewayAddr string `json:"ETHGatewayAddr"`
|
||||
StandardERC20GatewayAddr string `json:"StandardERC20GatewayAddr"`
|
||||
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
|
||||
WETHGatewayAddr string `json:"WETHGatewayAddr"`
|
||||
DAIGatewayAddr string `json:"DAIGatewayAddr"`
|
||||
USDCGatewayAddr string `json:"USDCGatewayAddr"`
|
||||
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
|
||||
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
|
||||
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
GatewayRouterAddr string `json:"GatewayRouterAddr"`
|
||||
MessageQueueAddr string `json:"MessageQueueAddr"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
type RedisConfig struct {
|
||||
Address string `json:"address"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
DB int `json:"db"`
|
||||
Local bool `json:"local"`
|
||||
MinIdleConns int `json:"minIdleConns"`
|
||||
ReadTimeoutMs int `json:"readTimeoutMs"`
|
||||
}
|
||||
|
||||
// Config is the configuration of the bridge history backend
|
||||
type Config struct {
|
||||
L1 *LayerConfig `json:"L1"`
|
||||
L2 *LayerConfig `json:"L2"`
|
||||
DB *database.Config `json:"db"`
|
||||
Redis *RedisConfig `json:"redis"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
func NewConfig(file string) (*Config, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &Config{}
|
||||
err = json.Unmarshal(buf, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
22
bridge-history-api/internal/controller/api/controller.go
Normal file
22
bridge-history-api/internal/controller/api/controller.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
var (
|
||||
// HistoryCtrler is controller instance
|
||||
HistoryCtrler *HistoryController
|
||||
|
||||
initControllerOnce sync.Once
|
||||
)
|
||||
|
||||
// InitController inits Controller with database
|
||||
func InitController(db *gorm.DB, redis *redis.Client) {
|
||||
initControllerOnce.Do(func() {
|
||||
HistoryCtrler = NewHistoryController(db, redis)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/logic"
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// HistoryController contains the query claimable txs service
|
||||
type HistoryController struct {
|
||||
historyLogic *logic.HistoryLogic
|
||||
}
|
||||
|
||||
// NewHistoryController return HistoryController instance
|
||||
func NewHistoryController(db *gorm.DB, redis *redis.Client) *HistoryController {
|
||||
return &HistoryController{
|
||||
historyLogic: logic.NewHistoryLogic(db, redis),
|
||||
}
|
||||
}
|
||||
|
||||
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
|
||||
func (c *HistoryController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
|
||||
// GetL2WithdrawalsByAddress defines the http get method behavior
|
||||
func (c *HistoryController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
|
||||
// GetTxsByAddress defines the http get method behavior
|
||||
func (c *HistoryController) GetTxsByAddress(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetTxsError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
|
||||
// PostQueryTxsByHashes defines the http post method behavior
|
||||
func (c *HistoryController) PostQueryTxsByHashes(ctx *gin.Context) {
|
||||
var req types.QueryByHashRequest
|
||||
if err := ctx.ShouldBindJSON(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/internal/logic"
|
||||
"bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// BatchController contains the query claimable txs service
|
||||
type BatchController struct {
|
||||
batchLogic *logic.BatchLogic
|
||||
}
|
||||
|
||||
// NewBatchController return NewBatchController instance
|
||||
func NewBatchController(db *gorm.DB) *BatchController {
|
||||
return &BatchController{
|
||||
batchLogic: logic.NewBatchLogic(db),
|
||||
}
|
||||
}
|
||||
|
||||
// GetWithdrawRootByBatchIndex defines the http get method behavior
|
||||
func (b *BatchController) GetWithdrawRootByBatchIndex(ctx *gin.Context) {
|
||||
var req types.QueryByBatchIndexRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
result, err := b.batchLogic.GetWithdrawRootByBatchIndex(ctx, req.BatchIndex)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetWithdrawRootByBatchIndexFailure, err)
|
||||
return
|
||||
}
|
||||
|
||||
types.RenderSuccess(ctx, result)
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
var (
|
||||
// HistoryCtrler is controller instance
|
||||
HistoryCtrler *HistoryController
|
||||
// BatchCtrler is controller instance
|
||||
BatchCtrler *BatchController
|
||||
// HealthCheck the health check controller
|
||||
HealthCheck *HealthCheckController
|
||||
// Ready the ready controller
|
||||
Ready *ReadyController
|
||||
|
||||
initControllerOnce sync.Once
|
||||
)
|
||||
|
||||
// InitController inits Controller with database
|
||||
func InitController(db *gorm.DB) {
|
||||
initControllerOnce.Do(func() {
|
||||
HistoryCtrler = NewHistoryController(db)
|
||||
BatchCtrler = NewBatchController(db)
|
||||
HealthCheck = NewHealthCheckController(db)
|
||||
Ready = NewReadyController()
|
||||
})
|
||||
}
|
||||
153
bridge-history-api/internal/controller/fetcher/l1_fetcher.go
Normal file
153
bridge-history-api/internal/controller/fetcher/l1_fetcher.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package fetcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/logic"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// L1MessageFetcher fetches cross message events from L1 and saves them to database.
|
||||
type L1MessageFetcher struct {
|
||||
ctx context.Context
|
||||
cfg *config.LayerConfig
|
||||
client *ethclient.Client
|
||||
|
||||
l1SyncHeight uint64
|
||||
l1LastSyncBlockHash common.Hash
|
||||
|
||||
eventUpdateLogic *logic.EventUpdateLogic
|
||||
l1FetcherLogic *logic.L1FetcherLogic
|
||||
|
||||
l1MessageFetcherRunningTotal prometheus.Counter
|
||||
l1MessageFetcherReorgTotal prometheus.Counter
|
||||
l1MessageFetcherSyncHeight prometheus.Gauge
|
||||
}
|
||||
|
||||
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
|
||||
c := &L1MessageFetcher{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
eventUpdateLogic: logic.NewEventUpdateLogic(db, true),
|
||||
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
c.l1MessageFetcherRunningTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "L1_message_fetcher_running_total",
|
||||
Help: "Current count of running L1 message fetcher instances.",
|
||||
})
|
||||
c.l1MessageFetcherReorgTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "L1_message_fetcher_reorg_total",
|
||||
Help: "Total count of blockchain reorgs encountered by the L1 message fetcher.",
|
||||
})
|
||||
c.l1MessageFetcherSyncHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "L1_message_fetcher_sync_height",
|
||||
Help: "Latest blockchain height the L1 message fetcher has synced with.",
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Start starts the L1 message fetching process.
|
||||
func (c *L1MessageFetcher) Start() {
|
||||
messageSyncedHeight, batchSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
|
||||
if dbErr != nil {
|
||||
log.Crit("L1MessageFetcher start failed", "err", dbErr)
|
||||
}
|
||||
|
||||
l1SyncHeight := messageSyncedHeight
|
||||
if batchSyncedHeight > l1SyncHeight {
|
||||
l1SyncHeight = batchSyncedHeight
|
||||
}
|
||||
if c.cfg.StartHeight > l1SyncHeight {
|
||||
l1SyncHeight = c.cfg.StartHeight - 1
|
||||
}
|
||||
|
||||
// Sync from an older block to prevent reorg during restart.
|
||||
if l1SyncHeight < logic.L1ReorgSafeDepth {
|
||||
l1SyncHeight = 0
|
||||
} else {
|
||||
l1SyncHeight -= logic.L1ReorgSafeDepth
|
||||
}
|
||||
|
||||
header, err := c.client.HeaderByNumber(c.ctx, new(big.Int).SetUint64(l1SyncHeight))
|
||||
if err != nil {
|
||||
log.Crit("failed to get L1 header by number", "block number", l1SyncHeight, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.updateL1SyncHeight(l1SyncHeight, header.Hash())
|
||||
|
||||
log.Info("Start L1 message fetcher", "message synced height", messageSyncedHeight, "batch synced height", batchSyncedHeight, "config start height", c.cfg.StartHeight, "sync start height", c.l1SyncHeight+1)
|
||||
|
||||
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
c.fetchAndSaveEvents(c.cfg.Confirmation)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
c.l1MessageFetcherRunningTotal.Inc()
|
||||
startHeight := c.l1SyncHeight + 1
|
||||
endHeight, rpcErr := utils.GetBlockNumber(c.ctx, c.client, confirmation)
|
||||
if rpcErr != nil {
|
||||
log.Error("failed to get L1 block number", "confirmation", confirmation, "err", rpcErr)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("fetch and save missing L1 events", "start height", startHeight, "end height", endHeight, "confirmation", confirmation)
|
||||
|
||||
for from := startHeight; from <= endHeight; from += c.cfg.FetchLimit {
|
||||
to := from + c.cfg.FetchLimit - 1
|
||||
if to > endHeight {
|
||||
to = endHeight
|
||||
}
|
||||
|
||||
isReorg, resyncHeight, lastBlockHash, l1FetcherResult, fetcherErr := c.l1FetcherLogic.L1Fetcher(c.ctx, from, to, c.l1LastSyncBlockHash)
|
||||
if fetcherErr != nil {
|
||||
log.Error("failed to fetch L1 events", "from", from, "to", to, "err", fetcherErr)
|
||||
return
|
||||
}
|
||||
|
||||
if isReorg {
|
||||
c.l1MessageFetcherReorgTotal.Inc()
|
||||
log.Warn("L1 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
|
||||
c.updateL1SyncHeight(resyncHeight, lastBlockHash)
|
||||
return
|
||||
}
|
||||
|
||||
if insertUpdateErr := c.eventUpdateLogic.L1InsertOrUpdate(c.ctx, l1FetcherResult); insertUpdateErr != nil {
|
||||
log.Error("failed to save L1 events", "from", from, "to", to, "err", insertUpdateErr)
|
||||
return
|
||||
}
|
||||
|
||||
c.updateL1SyncHeight(to, lastBlockHash)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *L1MessageFetcher) updateL1SyncHeight(height uint64, blockHash common.Hash) {
|
||||
c.l1MessageFetcherSyncHeight.Set(float64(height))
|
||||
c.l1LastSyncBlockHash = blockHash
|
||||
c.l1SyncHeight = height
|
||||
}
|
||||
152
bridge-history-api/internal/controller/fetcher/l2_fetcher.go
Normal file
152
bridge-history-api/internal/controller/fetcher/l2_fetcher.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package fetcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/logic"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// L2MessageFetcher fetches cross message events from L2 and saves them to database.
|
||||
type L2MessageFetcher struct {
|
||||
ctx context.Context
|
||||
cfg *config.LayerConfig
|
||||
db *gorm.DB
|
||||
client *ethclient.Client
|
||||
l2SyncHeight uint64
|
||||
l2LastSyncBlockHash common.Hash
|
||||
|
||||
eventUpdateLogic *logic.EventUpdateLogic
|
||||
l2FetcherLogic *logic.L2FetcherLogic
|
||||
|
||||
l2MessageFetcherRunningTotal prometheus.Counter
|
||||
l2MessageFetcherReorgTotal prometheus.Counter
|
||||
l2MessageFetcherSyncHeight prometheus.Gauge
|
||||
}
|
||||
|
||||
// NewL2MessageFetcher creates a new L2MessageFetcher instance.
|
||||
func NewL2MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
|
||||
c := &L2MessageFetcher{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
client: client,
|
||||
eventUpdateLogic: logic.NewEventUpdateLogic(db, false),
|
||||
l2FetcherLogic: logic.NewL2FetcherLogic(cfg, db, client),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
c.l2MessageFetcherRunningTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "L2_message_fetcher_running_total",
|
||||
Help: "Current count of running L2 message fetcher instances.",
|
||||
})
|
||||
c.l2MessageFetcherReorgTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "L2_message_fetcher_reorg_total",
|
||||
Help: "Total count of blockchain reorgs encountered by the L2 message fetcher.",
|
||||
})
|
||||
c.l2MessageFetcherSyncHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "L2_message_fetcher_sync_height",
|
||||
Help: "Latest blockchain height the L2 message fetcher has synced with.",
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Start starts the L2 message fetching process.
|
||||
func (c *L2MessageFetcher) Start() {
|
||||
l2SentMessageSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
|
||||
if dbErr != nil {
|
||||
log.Crit("failed to get L2 cross message processed height", "err", dbErr)
|
||||
return
|
||||
}
|
||||
|
||||
l2SyncHeight := l2SentMessageSyncedHeight
|
||||
// Sync from an older block to prevent reorg during restart.
|
||||
if l2SyncHeight < logic.L2ReorgSafeDepth {
|
||||
l2SyncHeight = 0
|
||||
} else {
|
||||
l2SyncHeight -= logic.L2ReorgSafeDepth
|
||||
}
|
||||
|
||||
header, err := c.client.HeaderByNumber(c.ctx, new(big.Int).SetUint64(l2SyncHeight))
|
||||
if err != nil {
|
||||
log.Crit("failed to get L2 header by number", "block number", l2SyncHeight, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.updateL2SyncHeight(l2SyncHeight, header.Hash())
|
||||
|
||||
log.Info("Start L2 message fetcher", "message synced height", l2SentMessageSyncedHeight, "sync start height", l2SyncHeight+1)
|
||||
|
||||
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
c.fetchAndSaveEvents(c.cfg.Confirmation)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
startHeight := c.l2SyncHeight + 1
|
||||
endHeight, rpcErr := utils.GetBlockNumber(c.ctx, c.client, confirmation)
|
||||
if rpcErr != nil {
|
||||
log.Error("failed to get L2 block number", "confirmation", confirmation, "err", rpcErr)
|
||||
return
|
||||
}
|
||||
log.Info("fetch and save missing L2 events", "start height", startHeight, "end height", endHeight, "confirmation", confirmation)
|
||||
c.l2MessageFetcherRunningTotal.Inc()
|
||||
|
||||
for from := startHeight; from <= endHeight; from += c.cfg.FetchLimit {
|
||||
to := from + c.cfg.FetchLimit - 1
|
||||
if to > endHeight {
|
||||
to = endHeight
|
||||
}
|
||||
|
||||
isReorg, resyncHeight, lastBlockHash, l2FetcherResult, fetcherErr := c.l2FetcherLogic.L2Fetcher(c.ctx, from, to, c.l2LastSyncBlockHash)
|
||||
if fetcherErr != nil {
|
||||
log.Error("failed to fetch L2 events", "from", from, "to", to, "err", fetcherErr)
|
||||
return
|
||||
}
|
||||
|
||||
if isReorg {
|
||||
c.l2MessageFetcherReorgTotal.Inc()
|
||||
log.Warn("L2 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
|
||||
c.updateL2SyncHeight(resyncHeight, lastBlockHash)
|
||||
return
|
||||
}
|
||||
|
||||
if insertUpdateErr := c.eventUpdateLogic.L2InsertOrUpdate(c.ctx, l2FetcherResult); insertUpdateErr != nil {
|
||||
log.Error("failed to save L2 events", "from", from, "to", to, "err", insertUpdateErr)
|
||||
return
|
||||
}
|
||||
|
||||
if updateErr := c.eventUpdateLogic.UpdateL1BatchIndexAndStatus(c.ctx, c.l2SyncHeight); updateErr != nil {
|
||||
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
|
||||
return
|
||||
}
|
||||
|
||||
c.updateL2SyncHeight(to, lastBlockHash)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *L2MessageFetcher) updateL2SyncHeight(height uint64, blockHash common.Hash) {
|
||||
c.l2MessageFetcherSyncHeight.Set(float64(height))
|
||||
c.l2LastSyncBlockHash = blockHash
|
||||
c.l2SyncHeight = height
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/internal/types"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// HealthCheckController is health check API
|
||||
type HealthCheckController struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// NewHealthCheckController returns an HealthCheckController instance
|
||||
func NewHealthCheckController(db *gorm.DB) *HealthCheckController {
|
||||
return &HealthCheckController{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// HealthCheck the api controller for coordinator health check
|
||||
func (a *HealthCheckController) HealthCheck(c *gin.Context) {
|
||||
if _, err := utils.Ping(a.db); err != nil {
|
||||
types.RenderFatal(c, err)
|
||||
return
|
||||
}
|
||||
types.RenderSuccess(c, nil)
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/internal/logic"
|
||||
"bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// HistoryController contains the query claimable txs service
|
||||
type HistoryController struct {
|
||||
historyLogic *logic.HistoryLogic
|
||||
}
|
||||
|
||||
// NewHistoryController return HistoryController instance
|
||||
func NewHistoryController(db *gorm.DB) *HistoryController {
|
||||
return &HistoryController{
|
||||
historyLogic: logic.NewHistoryLogic(db),
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllClaimableTxsByAddr defines the http get method behavior
|
||||
func (c *HistoryController) GetAllClaimableTxsByAddr(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
offset := (req.Page - 1) * req.PageSize
|
||||
limit := req.PageSize
|
||||
txs, total, err := c.historyLogic.GetClaimableTxsByAddress(ctx, common.HexToAddress(req.Address), offset, limit)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetClaimablesFailure, err)
|
||||
return
|
||||
}
|
||||
|
||||
types.RenderSuccess(ctx, &types.ResultData{Result: txs, Total: total})
|
||||
}
|
||||
|
||||
// GetAllTxsByAddr defines the http get method behavior
|
||||
func (c *HistoryController) GetAllTxsByAddr(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
|
||||
return
|
||||
}
|
||||
offset := (req.Page - 1) * req.PageSize
|
||||
limit := req.PageSize
|
||||
message, total, err := c.historyLogic.GetTxsByAddress(ctx, common.HexToAddress(req.Address), offset, limit)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetTxsByAddrFailure, err)
|
||||
return
|
||||
}
|
||||
types.RenderSuccess(ctx, &types.ResultData{Result: message, Total: total})
|
||||
}
|
||||
|
||||
// PostQueryTxsByHash defines the http post method behavior
|
||||
func (c *HistoryController) PostQueryTxsByHash(ctx *gin.Context) {
|
||||
var req types.QueryByHashRequest
|
||||
if err := ctx.ShouldBindJSON(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
result, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetTxsByHashFailure, err)
|
||||
return
|
||||
}
|
||||
types.RenderSuccess(ctx, &types.ResultData{Result: result, Total: 0})
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// ReadyController ready API
|
||||
type ReadyController struct {
|
||||
}
|
||||
|
||||
// NewReadyController returns an ReadyController instance
|
||||
func NewReadyController() *ReadyController {
|
||||
return &ReadyController{}
|
||||
}
|
||||
|
||||
// Ready the api controller for coordinator ready
|
||||
func (r *ReadyController) Ready(c *gin.Context) {
|
||||
types.RenderSuccess(c, nil)
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/orm"
|
||||
)
|
||||
|
||||
// BatchLogic example service.
|
||||
type BatchLogic struct {
|
||||
rollupOrm *orm.RollupBatch
|
||||
}
|
||||
|
||||
// NewBatchLogic returns services backed with a "db"
|
||||
func NewBatchLogic(db *gorm.DB) *BatchLogic {
|
||||
logic := &BatchLogic{rollupOrm: orm.NewRollupBatch(db)}
|
||||
return logic
|
||||
}
|
||||
|
||||
// GetWithdrawRootByBatchIndex get withdraw root by batch index from db
|
||||
func (b *BatchLogic) GetWithdrawRootByBatchIndex(ctx context.Context, batchIndex uint64) (string, error) {
|
||||
batch, err := b.rollupOrm.GetRollupBatchByIndex(ctx, batchIndex)
|
||||
if err != nil {
|
||||
log.Debug("getWithdrawRootByBatchIndex failed", "error", err)
|
||||
return "", err
|
||||
}
|
||||
if batch == nil {
|
||||
log.Debug("getWithdrawRootByBatchIndex failed", "error", "batch not found")
|
||||
return "", nil
|
||||
}
|
||||
return batch.WithdrawRoot, nil
|
||||
}
|
||||
210
bridge-history-api/internal/logic/event_update.go
Normal file
210
bridge-history-api/internal/logic/event_update.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// EventUpdateLogic the logic of insert/update the database
|
||||
type EventUpdateLogic struct {
|
||||
db *gorm.DB
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
batchEventOrm *orm.BatchEvent
|
||||
|
||||
eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight prometheus.Gauge
|
||||
eventUpdateLogicL2MessageNonceUpdateHeight prometheus.Gauge
|
||||
}
|
||||
|
||||
// NewEventUpdateLogic creates a EventUpdateLogic instance
|
||||
func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
|
||||
b := &EventUpdateLogic{
|
||||
db: db,
|
||||
crossMessageOrm: orm.NewCrossMessage(db),
|
||||
batchEventOrm: orm.NewBatchEvent(db),
|
||||
}
|
||||
|
||||
if !isL1 {
|
||||
reg := prometheus.DefaultRegisterer
|
||||
b.eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "event_update_logic_L1_finalize_batch_event_L2_block_update_height",
|
||||
Help: "L2 block height of the latest L1 batch event that has been finalized and updated in the message_table.",
|
||||
})
|
||||
b.eventUpdateLogicL2MessageNonceUpdateHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "event_update_logic_L2_message_nonce_update_height",
|
||||
Help: "L2 message nonce height in the latest L1 batch event that has been finalized and updated in the message_table.",
|
||||
})
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// GetL1SyncHeight gets the l1 sync height from db
|
||||
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, error) {
|
||||
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL1SentMessage)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 cross message synced height", "error", err)
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
batchSyncedHeight, err := b.batchEventOrm.GetBatchEventSyncedHeightInDB(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 batch event synced height", "error", err)
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
return messageSyncedHeight, batchSyncedHeight, nil
|
||||
}
|
||||
|
||||
// GetL2MessageSyncedHeightInDB gets L2 messages synced height
|
||||
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, error) {
|
||||
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL2SentMessage)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 cross message processed height", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
return l2SentMessageSyncedHeight, nil
|
||||
}
|
||||
|
||||
// L1InsertOrUpdate inserts or updates l1 messages
|
||||
func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult *L1FilterResult) error {
|
||||
err := b.db.Transaction(func(tx *gorm.DB) error {
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages, tx); txErr != nil {
|
||||
log.Error("failed to insert L1 deposit messages", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages, tx); txErr != nil {
|
||||
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents, tx); txErr != nil {
|
||||
log.Error("failed to insert or update batch events", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents, tx); txErr != nil {
|
||||
log.Error("failed to insert L1 message queue events", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l1FetcherResult.RevertedTxs, tx); txErr != nil {
|
||||
log.Error("failed to insert L1 failed gateway router transactions", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error("failed to update db of L1 events", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, batchIndex, startBlock, endBlock uint64) error {
|
||||
l2WithdrawMessages, err := b.crossMessageOrm.GetL2WithdrawalsByBlockRange(ctx, startBlock, endBlock)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 withdrawals by batch index", "batch index", batchIndex, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(l2WithdrawMessages) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
withdrawTrie := utils.NewWithdrawTrie()
|
||||
lastMessage, err := b.crossMessageOrm.GetL2LatestFinalizedWithdrawal(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest L2 finalized sent message event", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if lastMessage != nil {
|
||||
withdrawTrie.Initialize(lastMessage.MessageNonce, common.HexToHash(lastMessage.MessageHash), lastMessage.MerkleProof)
|
||||
}
|
||||
|
||||
if withdrawTrie.NextMessageNonce != l2WithdrawMessages[0].MessageNonce {
|
||||
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actuall next message nonce", l2WithdrawMessages[0].MessageNonce)
|
||||
return fmt.Errorf("nonce mismatch")
|
||||
}
|
||||
|
||||
messageHashes := make([]common.Hash, len(l2WithdrawMessages))
|
||||
for i, message := range l2WithdrawMessages {
|
||||
messageHashes[i] = common.HexToHash(message.MessageHash)
|
||||
}
|
||||
|
||||
proofs := withdrawTrie.AppendMessages(messageHashes)
|
||||
|
||||
for i, message := range l2WithdrawMessages {
|
||||
message.MerkleProof = proofs[i]
|
||||
message.RollupStatus = int(orm.RollupStatusTypeFinalized)
|
||||
message.BatchIndex = batchIndex
|
||||
}
|
||||
|
||||
if dbErr := b.crossMessageOrm.UpdateBatchIndexRollupStatusMerkleProofOfL2Messages(ctx, l2WithdrawMessages); dbErr != nil {
|
||||
log.Error("failed to update batch index and rollup status and merkle proof of L2 messages", "err", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
|
||||
b.eventUpdateLogicL2MessageNonceUpdateHeight.Set(float64(withdrawTrie.NextMessageNonce - 1))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL1BatchIndexAndStatus updates L1 finalized batch index and status
|
||||
func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, height uint64) error {
|
||||
finalizedBatches, err := b.batchEventOrm.GetFinalizedBatchesLEBlockHeight(ctx, height)
|
||||
if err != nil {
|
||||
log.Error("failed to get batches >= block height", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, finalizedBatch := range finalizedBatches {
|
||||
log.Info("update finalized batch info of L2 withdrawals", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber)
|
||||
if updateErr := b.updateL2WithdrawMessageInfos(ctx, finalizedBatch.BatchIndex, finalizedBatch.StartBlockNumber, finalizedBatch.EndBlockNumber); updateErr != nil {
|
||||
log.Error("failed to update L2 withdraw message infos", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", updateErr)
|
||||
return updateErr
|
||||
}
|
||||
if dbErr := b.batchEventOrm.UpdateBatchEventStatus(ctx, finalizedBatch.BatchIndex); dbErr != nil {
|
||||
log.Error("failed to update batch event status as updated", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
b.eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight.Set(float64(finalizedBatch.EndBlockNumber))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// L2InsertOrUpdate inserts or updates L2 messages
|
||||
func (b *EventUpdateLogic) L2InsertOrUpdate(ctx context.Context, l2FetcherResult *L2FilterResult) error {
|
||||
err := b.db.Transaction(func(tx *gorm.DB) error {
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages, tx); txErr != nil {
|
||||
log.Error("failed to insert L2 withdrawal messages", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages, tx); txErr != nil {
|
||||
log.Error("failed to update L2 relayed messages of L1 deposits", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l2FetcherResult.OtherRevertedTxs, tx); txErr != nil {
|
||||
log.Error("failed to insert L2 failed gateway router transactions", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error("failed to update db of L2 events", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2,211 +2,406 @@ package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"golang.org/x/sync/singleflight"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/internal/types"
|
||||
"bridge-history-api/orm"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// HistoryLogic example service.
|
||||
const (
|
||||
// cacheKeyPrefixBridgeHistory serves as a specific namespace for all Redis cache keys
|
||||
// associated with the 'bridge-history' user. This prefix is used to enforce access controls
|
||||
// in Redis, allowing permissions to be set such that only users with the appropriate
|
||||
// access rights can read or write to keys starting with "bridge-history".
|
||||
cacheKeyPrefixBridgeHistory = "bridge-history-"
|
||||
|
||||
cacheKeyPrefixL2ClaimableWithdrawalsByAddr = cacheKeyPrefixBridgeHistory + "l2ClaimableWithdrawalsByAddr:"
|
||||
cacheKeyPrefixL2WithdrawalsByAddr = cacheKeyPrefixBridgeHistory + "l2WithdrawalsByAddr:"
|
||||
cacheKeyPrefixTxsByAddr = cacheKeyPrefixBridgeHistory + "txsByAddr:"
|
||||
cacheKeyPrefixQueryTxsByHashes = cacheKeyPrefixBridgeHistory + "queryTxsByHashes:"
|
||||
cacheKeyExpiredTime = 1 * time.Minute
|
||||
)
|
||||
|
||||
// HistoryLogic services.
|
||||
type HistoryLogic struct {
|
||||
db *gorm.DB
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
batchEventOrm *orm.BatchEvent
|
||||
redis *redis.Client
|
||||
singleFlight singleflight.Group
|
||||
cacheMetrics *cacheMetrics
|
||||
}
|
||||
|
||||
// NewHistoryLogic returns services backed with a "db"
|
||||
func NewHistoryLogic(db *gorm.DB) *HistoryLogic {
|
||||
logic := &HistoryLogic{db: db}
|
||||
// NewHistoryLogic returns bridge history services.
|
||||
func NewHistoryLogic(db *gorm.DB, redis *redis.Client) *HistoryLogic {
|
||||
logic := &HistoryLogic{
|
||||
crossMessageOrm: orm.NewCrossMessage(db),
|
||||
batchEventOrm: orm.NewBatchEvent(db),
|
||||
redis: redis,
|
||||
cacheMetrics: initCacheMetrics(),
|
||||
}
|
||||
return logic
|
||||
}
|
||||
|
||||
// getCrossTxClaimInfo get UserClaimInfos by address
|
||||
func getCrossTxClaimInfo(ctx context.Context, msgHash string, db *gorm.DB) *types.UserClaimInfo {
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(db)
|
||||
rollupOrm := orm.NewRollupBatch(db)
|
||||
l2sentMsg, err := l2SentMsgOrm.GetL2SentMsgByHash(ctx, msgHash)
|
||||
if err != nil || l2sentMsg == nil {
|
||||
log.Debug("getCrossTxClaimInfo failed", "error", err)
|
||||
return &types.UserClaimInfo{}
|
||||
}
|
||||
batch, err := rollupOrm.GetRollupBatchByIndex(ctx, l2sentMsg.BatchIndex)
|
||||
if err != nil {
|
||||
log.Debug("getCrossTxClaimInfo failed", "error", err)
|
||||
return &types.UserClaimInfo{}
|
||||
}
|
||||
return &types.UserClaimInfo{
|
||||
From: l2sentMsg.Sender,
|
||||
To: l2sentMsg.Target,
|
||||
Value: l2sentMsg.Value,
|
||||
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
|
||||
Message: l2sentMsg.MsgData,
|
||||
Proof: "0x" + l2sentMsg.MsgProof,
|
||||
BatchHash: batch.BatchHash,
|
||||
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func updateCrossTxHash(ctx context.Context, msgHash string, txInfo *types.TxHistoryInfo, db *gorm.DB) {
|
||||
relayed := orm.NewRelayedMsg(db)
|
||||
relayed, err := relayed.GetRelayedMsgByHash(ctx, msgHash)
|
||||
if err != nil {
|
||||
log.Debug("updateCrossTxHash failed", "error", err)
|
||||
return
|
||||
}
|
||||
if relayed == nil {
|
||||
return
|
||||
}
|
||||
if relayed.Layer1Hash != "" {
|
||||
txInfo.FinalizeTx.Hash = relayed.Layer1Hash
|
||||
txInfo.FinalizeTx.BlockNumber = relayed.Height
|
||||
return
|
||||
}
|
||||
if relayed.Layer2Hash != "" {
|
||||
txInfo.FinalizeTx.Hash = relayed.Layer2Hash
|
||||
txInfo.FinalizeTx.BlockNumber = relayed.Height
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// GetClaimableTxsByAddress get all claimable txs under given address
|
||||
func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address common.Address, offset int, limit int) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(h.db)
|
||||
l2CrossMsgOrm := orm.NewCrossMsg(h.db)
|
||||
total, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressTotalNum(ctx, address.Hex())
|
||||
if err != nil || total == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
results, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressWithOffset(ctx, address.Hex(), offset, limit)
|
||||
if err != nil || len(results) == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
var msgHashList []string
|
||||
for _, result := range results {
|
||||
msgHashList = append(msgHashList, result.MsgHash)
|
||||
}
|
||||
crossMsgs, err := l2CrossMsgOrm.GetL2CrossMsgByMsgHashList(ctx, msgHashList)
|
||||
// crossMsgs can be empty, because they can be emitted by user directly call contract
|
||||
if err != nil {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
crossMsgMap := make(map[string]*orm.CrossMsg)
|
||||
for _, crossMsg := range crossMsgs {
|
||||
crossMsgMap[crossMsg.MsgHash] = crossMsg
|
||||
}
|
||||
for _, result := range results {
|
||||
txInfo := &types.TxHistoryInfo{
|
||||
Hash: result.TxHash,
|
||||
IsL1: false,
|
||||
BlockNumber: result.Height,
|
||||
FinalizeTx: &types.Finalized{},
|
||||
ClaimInfo: getCrossTxClaimInfo(ctx, result.MsgHash, h.db),
|
||||
}
|
||||
if crossMsg, exist := crossMsgMap[result.MsgHash]; exist {
|
||||
txInfo.Amount = crossMsg.Amount
|
||||
txInfo.To = crossMsg.Target
|
||||
txInfo.BlockTimestamp = crossMsg.Timestamp
|
||||
txInfo.CreatedAt = crossMsg.CreatedAt
|
||||
txInfo.L1Token = crossMsg.Layer1Token
|
||||
txInfo.L2Token = crossMsg.Layer2Token
|
||||
}
|
||||
txHistories = append(txHistories, txInfo)
|
||||
}
|
||||
return txHistories, total, err
|
||||
}
|
||||
|
||||
// GetTxsByAddress get all txs under given address
|
||||
func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address common.Address, offset int, limit int) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
utilOrm := orm.NewCrossMsg(h.db)
|
||||
total, err := utilOrm.GetTotalCrossMsgCountByAddress(ctx, address.String())
|
||||
if err != nil || total == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
result, err := utilOrm.GetCrossMsgsByAddressWithOffset(ctx, address.String(), offset, limit)
|
||||
|
||||
// GetL2UnclaimedWithdrawalsByAddress gets all unclaimed withdrawal txs under given address.
|
||||
func (h *HistoryLogic) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, address string, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
cacheKey := cacheKeyPrefixL2ClaimableWithdrawalsByAddr + address
|
||||
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
|
||||
if err != nil {
|
||||
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
for _, msg := range result {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
Hash: msg.Layer1Hash + msg.Layer2Hash,
|
||||
Amount: msg.Amount,
|
||||
To: msg.Target,
|
||||
L1Token: msg.Layer1Token,
|
||||
L2Token: msg.Layer2Token,
|
||||
IsL1: msg.MsgType == int(orm.Layer1Msg),
|
||||
BlockNumber: msg.Height,
|
||||
BlockTimestamp: msg.Timestamp,
|
||||
CreatedAt: msg.CreatedAt,
|
||||
FinalizeTx: &types.Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
ClaimInfo: getCrossTxClaimInfo(ctx, msg.MsgHash, h.db),
|
||||
}
|
||||
updateCrossTxHash(ctx, msg.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
|
||||
if isHit {
|
||||
h.cacheMetrics.cacheHits.WithLabelValues("GetL2UnclaimedWithdrawalsByAddress").Inc()
|
||||
log.Info("cache hit", "cache key", cacheKey)
|
||||
return pagedTxs, total, nil
|
||||
}
|
||||
return txHistories, total, nil
|
||||
|
||||
h.cacheMetrics.cacheMisses.WithLabelValues("GetL2UnclaimedWithdrawalsByAddress").Inc()
|
||||
log.Info("cache miss", "cache key", cacheKey)
|
||||
|
||||
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
|
||||
var messages []*orm.CrossMessage
|
||||
messages, err = h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return messages, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 claimable withdrawals by address", "address", address, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
messages, ok := result.([]*orm.CrossMessage)
|
||||
if !ok {
|
||||
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
|
||||
return nil, 0, errors.New("unexpected error")
|
||||
}
|
||||
|
||||
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
|
||||
}
|
||||
|
||||
// GetTxsByHashes get tx infos under given tx hashes
|
||||
func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, hashes []string) ([]*types.TxHistoryInfo, error) {
|
||||
txHistories := make([]*types.TxHistoryInfo, 0)
|
||||
CrossMsgOrm := orm.NewCrossMsg(h.db)
|
||||
for _, hash := range hashes {
|
||||
l1result, err := CrossMsgOrm.GetL1CrossMsgByHash(ctx, common.HexToHash(hash))
|
||||
// GetL2WithdrawalsByAddress gets all withdrawal txs under given address.
|
||||
func (h *HistoryLogic) GetL2WithdrawalsByAddress(ctx context.Context, address string, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
cacheKey := cacheKeyPrefixL2WithdrawalsByAddr + address
|
||||
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
|
||||
if err != nil {
|
||||
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if isHit {
|
||||
h.cacheMetrics.cacheHits.WithLabelValues("GetL2WithdrawalsByAddress").Inc()
|
||||
log.Info("cache hit", "cache key", cacheKey)
|
||||
return pagedTxs, total, nil
|
||||
}
|
||||
|
||||
h.cacheMetrics.cacheMisses.WithLabelValues("GetL2WithdrawalsByAddress").Inc()
|
||||
log.Info("cache miss", "cache key", cacheKey)
|
||||
|
||||
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
|
||||
var messages []*orm.CrossMessage
|
||||
messages, err = h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if l1result != nil {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
Hash: l1result.Layer1Hash,
|
||||
Amount: l1result.Amount,
|
||||
To: l1result.Target,
|
||||
IsL1: true,
|
||||
L1Token: l1result.Layer1Token,
|
||||
L2Token: l1result.Layer2Token,
|
||||
BlockNumber: l1result.Height,
|
||||
BlockTimestamp: l1result.Timestamp,
|
||||
CreatedAt: l1result.CreatedAt,
|
||||
FinalizeTx: &types.Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
}
|
||||
updateCrossTxHash(ctx, l1result.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
continue
|
||||
}
|
||||
l2result, err := CrossMsgOrm.GetL2CrossMsgByHash(ctx, common.HexToHash(hash))
|
||||
return messages, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 withdrawals by address", "address", address, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
messages, ok := result.([]*orm.CrossMessage)
|
||||
if !ok {
|
||||
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
|
||||
return nil, 0, errors.New("unexpected error")
|
||||
}
|
||||
|
||||
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
|
||||
}
|
||||
|
||||
// GetTxsByAddress gets tx infos under given address.
|
||||
func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address string, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
cacheKey := cacheKeyPrefixTxsByAddr + address
|
||||
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
|
||||
if err != nil {
|
||||
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if isHit {
|
||||
h.cacheMetrics.cacheHits.WithLabelValues("GetTxsByAddress").Inc()
|
||||
log.Info("cache hit", "cache key", cacheKey)
|
||||
return pagedTxs, total, nil
|
||||
}
|
||||
|
||||
h.cacheMetrics.cacheMisses.WithLabelValues("GetTxsByAddress").Inc()
|
||||
log.Info("cache miss", "cache key", cacheKey)
|
||||
|
||||
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
|
||||
var messages []*orm.CrossMessage
|
||||
messages, err = h.crossMessageOrm.GetTxsByAddress(ctx, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if l2result != nil {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
Hash: l2result.Layer2Hash,
|
||||
Amount: l2result.Amount,
|
||||
To: l2result.Target,
|
||||
IsL1: false,
|
||||
L1Token: l2result.Layer1Token,
|
||||
L2Token: l2result.Layer2Token,
|
||||
BlockNumber: l2result.Height,
|
||||
BlockTimestamp: l2result.Timestamp,
|
||||
CreatedAt: l2result.CreatedAt,
|
||||
FinalizeTx: &types.Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
ClaimInfo: getCrossTxClaimInfo(ctx, l2result.MsgHash, h.db),
|
||||
}
|
||||
updateCrossTxHash(ctx, l2result.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
return messages, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("failed to get txs by address", "address", address, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
messages, ok := result.([]*orm.CrossMessage)
|
||||
if !ok {
|
||||
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
|
||||
return nil, 0, errors.New("unexpected error")
|
||||
}
|
||||
|
||||
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
|
||||
}
|
||||
|
||||
// GetTxsByHashes gets tx infos under given tx hashes.
|
||||
func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([]*types.TxHistoryInfo, error) {
|
||||
hashesMap := make(map[string]struct{}, len(txHashes))
|
||||
results := make([]*types.TxHistoryInfo, 0, len(txHashes))
|
||||
uncachedHashes := make([]string, 0, len(txHashes))
|
||||
|
||||
for _, hash := range txHashes {
|
||||
if _, exists := hashesMap[hash]; exists {
|
||||
// Skip duplicate tx hash values.
|
||||
continue
|
||||
}
|
||||
hashesMap[hash] = struct{}{}
|
||||
|
||||
cacheKey := cacheKeyPrefixQueryTxsByHashes + hash
|
||||
cachedData, err := h.redis.Get(ctx, cacheKey).Bytes()
|
||||
if err != nil && errors.Is(err, redis.Nil) {
|
||||
h.cacheMetrics.cacheMisses.WithLabelValues("PostQueryTxsByHashes").Inc()
|
||||
log.Info("cache miss", "cache key", cacheKey)
|
||||
uncachedHashes = append(uncachedHashes, hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Error("failed to get data from Redis", "error", err)
|
||||
uncachedHashes = append(uncachedHashes, hash)
|
||||
continue
|
||||
}
|
||||
|
||||
h.cacheMetrics.cacheHits.WithLabelValues("PostQueryTxsByHashes").Inc()
|
||||
log.Info("cache hit", "cache key", cacheKey)
|
||||
|
||||
if len(cachedData) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var txInfo types.TxHistoryInfo
|
||||
if unmarshalErr := json.Unmarshal(cachedData, &txInfo); unmarshalErr != nil {
|
||||
log.Error("failed to unmarshal cached data", "error", unmarshalErr)
|
||||
uncachedHashes = append(uncachedHashes, hash)
|
||||
continue
|
||||
}
|
||||
results = append(results, &txInfo)
|
||||
}
|
||||
|
||||
if len(uncachedHashes) > 0 {
|
||||
messages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
|
||||
if err != nil {
|
||||
log.Error("failed to get messages by tx hashes", "hashes", uncachedHashes)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
for _, message := range messages {
|
||||
txHistories = append(txHistories, getTxHistoryInfo(message))
|
||||
}
|
||||
|
||||
resultMap := make(map[string]*types.TxHistoryInfo)
|
||||
for _, result := range txHistories {
|
||||
results = append(results, result)
|
||||
resultMap[result.Hash] = result
|
||||
}
|
||||
|
||||
for _, hash := range uncachedHashes {
|
||||
cacheKey := cacheKeyPrefixQueryTxsByHashes + hash
|
||||
result, found := resultMap[hash]
|
||||
if !found {
|
||||
// tx hash not found, which is also a valid result, cache empty string.
|
||||
if cacheErr := h.redis.Set(ctx, cacheKey, "", cacheKeyExpiredTime).Err(); cacheErr != nil {
|
||||
log.Error("failed to set data to Redis", "error", cacheErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
jsonData, unmarshalErr := json.Marshal(result)
|
||||
if unmarshalErr != nil {
|
||||
log.Error("failed to marshal data", "error", unmarshalErr)
|
||||
continue
|
||||
}
|
||||
|
||||
if cacheErr := h.redis.Set(ctx, cacheKey, jsonData, cacheKeyExpiredTime).Err(); cacheErr != nil {
|
||||
log.Error("failed to set data to Redis", "error", cacheErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
return txHistories, nil
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
MessageHash: message.MessageHash,
|
||||
TokenType: orm.TokenType(message.TokenType),
|
||||
TokenIDs: utils.ConvertStringToStringArray(message.TokenIDs),
|
||||
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmounts),
|
||||
L1TokenAddress: message.L1TokenAddress,
|
||||
L2TokenAddress: message.L2TokenAddress,
|
||||
MessageType: orm.MessageType(message.MessageType),
|
||||
TxStatus: orm.TxStatusType(message.TxStatus),
|
||||
BlockTimestamp: message.BlockTimestamp,
|
||||
}
|
||||
if txHistory.MessageType == orm.MessageTypeL1SentMessage {
|
||||
txHistory.Hash = message.L1TxHash
|
||||
txHistory.ReplayTxHash = message.L1ReplayTxHash
|
||||
txHistory.RefundTxHash = message.L1RefundTxHash
|
||||
txHistory.BlockNumber = message.L1BlockNumber
|
||||
txHistory.CounterpartChainTx = &types.CounterpartChainTx{
|
||||
Hash: message.L2TxHash,
|
||||
BlockNumber: message.L2BlockNumber,
|
||||
}
|
||||
} else {
|
||||
txHistory.Hash = message.L2TxHash
|
||||
txHistory.BlockNumber = message.L2BlockNumber
|
||||
txHistory.CounterpartChainTx = &types.CounterpartChainTx{
|
||||
Hash: message.L1TxHash,
|
||||
BlockNumber: message.L1BlockNumber,
|
||||
}
|
||||
if orm.RollupStatusType(message.RollupStatus) == orm.RollupStatusTypeFinalized {
|
||||
txHistory.ClaimInfo = &types.ClaimInfo{
|
||||
From: message.MessageFrom,
|
||||
To: message.MessageTo,
|
||||
Value: message.MessageValue,
|
||||
Nonce: strconv.FormatUint(message.MessageNonce, 10),
|
||||
Message: message.MessageData,
|
||||
Proof: types.L2MessageProof{
|
||||
BatchIndex: strconv.FormatUint(message.BatchIndex, 10),
|
||||
MerkleProof: "0x" + common.Bytes2Hex(message.MerkleProof),
|
||||
},
|
||||
Claimable: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
return txHistory
|
||||
}
|
||||
|
||||
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
|
||||
start := int64((pageNum - 1) * pageSize)
|
||||
end := start + int64(pageSize) - 1
|
||||
|
||||
total, err := h.redis.ZCard(ctx, cacheKey).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zcard result", "error", err)
|
||||
return nil, 0, false, err
|
||||
}
|
||||
|
||||
if total == 0 {
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
values, err := h.redis.ZRange(ctx, cacheKey, start, end).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zrange result", "error", err)
|
||||
return nil, 0, false, err
|
||||
}
|
||||
|
||||
if len(values) == 0 {
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
// check if it's empty placeholder.
|
||||
if len(values) == 1 && values[0] == "empty_page" {
|
||||
return nil, 0, true, nil
|
||||
}
|
||||
|
||||
var pagedTxs []*types.TxHistoryInfo
|
||||
for _, v := range values {
|
||||
var tx types.TxHistoryInfo
|
||||
if unmarshalErr := json.Unmarshal([]byte(v), &tx); unmarshalErr != nil {
|
||||
log.Error("failed to unmarshal transaction data", "error", unmarshalErr)
|
||||
return nil, 0, false, unmarshalErr
|
||||
}
|
||||
pagedTxs = append(pagedTxs, &tx)
|
||||
}
|
||||
return pagedTxs, uint64(total), true, nil
|
||||
}
|
||||
|
||||
func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []*types.TxHistoryInfo) error {
|
||||
_, err := h.redis.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
if len(txs) == 0 {
|
||||
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: 0, Member: "empty_page"}).Err(); err != nil {
|
||||
log.Error("failed to add empty page indicator to sorted set", "error", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// The transactions are sorted, thus we set the score as their indices.
|
||||
for i, tx := range txs {
|
||||
txBytes, err := json.Marshal(tx)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal transaction to json", "error", err)
|
||||
return err
|
||||
}
|
||||
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(i), Member: txBytes}).Err(); err != nil {
|
||||
log.Error("failed to add transaction to sorted set", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := pipe.Expire(ctx, cacheKey, cacheKeyExpiredTime).Err(); err != nil {
|
||||
log.Error("failed to set expiry time", "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("failed to execute transaction", "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, messages []*orm.CrossMessage, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
for _, message := range messages {
|
||||
txHistories = append(txHistories, getTxHistoryInfo(message))
|
||||
}
|
||||
|
||||
err := h.cacheTxsInfo(ctx, cacheKey, txHistories)
|
||||
if err != nil {
|
||||
log.Error("failed to cache txs info", "key", cacheKey, "err", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
|
||||
if err != nil {
|
||||
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if !isHit {
|
||||
log.Error("cache miss after write, expect hit", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
return pagedTxs, total, nil
|
||||
}
|
||||
|
||||
40
bridge-history-api/internal/logic/history_logic_metrics.go
Normal file
40
bridge-history-api/internal/logic/history_logic_metrics.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type cacheMetrics struct {
|
||||
cacheHits *prometheus.CounterVec
|
||||
cacheMisses *prometheus.CounterVec
|
||||
}
|
||||
|
||||
var (
|
||||
initMetricsOnce sync.Once
|
||||
cm *cacheMetrics
|
||||
)
|
||||
|
||||
func initCacheMetrics() *cacheMetrics {
|
||||
initMetricsOnce.Do(func() {
|
||||
cm = &cacheMetrics{
|
||||
cacheHits: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "bridge_history_api_cache_hits_total",
|
||||
Help: "The total number of cache hits",
|
||||
},
|
||||
[]string{"api"},
|
||||
),
|
||||
cacheMisses: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "bridge_history_api_cache_misses_total",
|
||||
Help: "The total number of cache misses",
|
||||
},
|
||||
[]string{"api"},
|
||||
),
|
||||
}
|
||||
})
|
||||
return cm
|
||||
}
|
||||
272
bridge-history-api/internal/logic/l1_event_parser.go
Normal file
272
bridge-history-api/internal/logic/l1_event_parser.go
Normal file
@@ -0,0 +1,272 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// L1EventParser the l1 event parser
|
||||
type L1EventParser struct {
|
||||
}
|
||||
|
||||
// NewL1EventParser creates l1 event parser
|
||||
func NewL1EventParser() *L1EventParser {
|
||||
return &L1EventParser{}
|
||||
}
|
||||
|
||||
// ParseL1CrossChainEventLogs parses L1 watched cross chain events.
|
||||
func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l1DepositMessages []*orm.CrossMessage
|
||||
var l1RelayedMessages []*orm.CrossMessage
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1DepositETHSig:
|
||||
event := backendabi.ETHMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ETHGatewayABI, &event, "DepositETH", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DepositETH event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeETH)
|
||||
lastMessage.TokenAmounts = event.Amount.String()
|
||||
case backendabi.L1DepositERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL1ERC20GatewayABI, &event, "DepositERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC20 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC20)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenAmounts = event.Amount.String()
|
||||
case backendabi.L1DepositERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "DepositERC721", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DepositERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC721)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = event.TokenID.String()
|
||||
case backendabi.L1BatchDepositERC721Sig:
|
||||
event := backendabi.BatchERC721MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "BatchDepositERC721", vlog); err != nil {
|
||||
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC721)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
|
||||
case backendabi.L1DepositERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "DepositERC1155", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC1155)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = event.TokenID.String()
|
||||
lastMessage.TokenAmounts = event.Amount.String()
|
||||
case backendabi.L1BatchDepositERC1155Sig:
|
||||
event := backendabi.BatchERC1155MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog); err != nil {
|
||||
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC1155)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
|
||||
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
|
||||
case backendabi.L1SentMessageEventSig:
|
||||
event := backendabi.L1SentMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l1DepositMessages = append(l1DepositMessages, &orm.CrossMessage{
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Receiver: event.Target.String(),
|
||||
TokenType: int(orm.TokenTypeETH),
|
||||
L1TxHash: vlog.TxHash.String(),
|
||||
TokenAmounts: event.Value.String(),
|
||||
MessageNonce: event.MessageNonce.Uint64(),
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
TxStatus: int(orm.TxStatusTypeSent),
|
||||
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
|
||||
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
|
||||
})
|
||||
case backendabi.L1RelayedMessageEventSig:
|
||||
event := backendabi.L1RelayedMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "RelayedMessage", vlog); err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
|
||||
MessageHash: event.MessageHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
L1TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(orm.TxStatusTypeRelayed),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
})
|
||||
case backendabi.L1FailedRelayedMessageEventSig:
|
||||
event := backendabi.L1FailedRelayedMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "FailedRelayedMessage", vlog); err != nil {
|
||||
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
|
||||
MessageHash: event.MessageHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
L1TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(orm.TxStatusTypeFailedRelayed),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
})
|
||||
}
|
||||
}
|
||||
return l1DepositMessages, l1RelayedMessages, nil
|
||||
}
|
||||
|
||||
// ParseL1BatchEventLogs parses L1 watched batch events.
|
||||
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client) ([]*orm.BatchEvent, error) {
|
||||
var l1BatchEvents []*orm.BatchEvent
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1CommitBatchEventSig:
|
||||
event := backendabi.L1CommitBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "CommitBatch", vlog); err != nil {
|
||||
log.Warn("Failed to unpack CommitBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
|
||||
if err != nil || isPending {
|
||||
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(orm.BatchStatusTypeCommitted),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
BatchHash: event.BatchHash.String(),
|
||||
StartBlockNumber: startBlock,
|
||||
EndBlockNumber: endBlock,
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
case backendabi.L1RevertBatchEventSig:
|
||||
event := backendabi.L1RevertBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
|
||||
log.Warn("Failed to unpack RevertBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(orm.BatchStatusTypeReverted),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
BatchHash: event.BatchHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
case backendabi.L1FinalizeBatchEventSig:
|
||||
event := backendabi.L1FinalizeBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
|
||||
log.Warn("Failed to unpack FinalizeBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(orm.BatchStatusTypeFinalized),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
BatchHash: event.BatchHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
}
|
||||
}
|
||||
return l1BatchEvents, nil
|
||||
}
|
||||
|
||||
// ParseL1MessageQueueEventLogs parses L1 watched message queue events.
|
||||
func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1DepositMessages []*orm.CrossMessage) ([]*orm.MessageQueueEvent, error) {
|
||||
messageHashes := make(map[common.Hash]struct{})
|
||||
for _, msg := range l1DepositMessages {
|
||||
messageHashes[common.HexToHash(msg.MessageHash)] = struct{}{}
|
||||
}
|
||||
|
||||
var l1MessageQueueEvents []*orm.MessageQueueEvent
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1QueueTransactionEventSig:
|
||||
event := backendabi.L1QueueTransactionEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "QueueTransaction", vlog); err != nil {
|
||||
log.Warn("Failed to unpack QueueTransaction event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
messageHash := common.BytesToHash(crypto.Keccak256(event.Data))
|
||||
// If the message hash is not found in the map, it's not a replayMessage or enforced tx (omitted); add it to the events.
|
||||
if _, exists := messageHashes[messageHash]; !exists {
|
||||
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
|
||||
EventType: orm.MessageQueueEventTypeQueueTransaction,
|
||||
QueueIndex: event.QueueIndex,
|
||||
MessageHash: messageHash,
|
||||
TxHash: vlog.TxHash,
|
||||
})
|
||||
}
|
||||
case backendabi.L1DequeueTransactionEventSig:
|
||||
event := backendabi.L1DequeueTransactionEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DequeueTransaction", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DequeueTransaction event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
skippedIndices := utils.GetSkippedQueueIndices(event.StartIndex.Uint64(), event.SkippedBitmap)
|
||||
for _, index := range skippedIndices {
|
||||
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
|
||||
EventType: orm.MessageQueueEventTypeDequeueTransaction,
|
||||
QueueIndex: index,
|
||||
})
|
||||
}
|
||||
case backendabi.L1DropTransactionEventSig:
|
||||
event := backendabi.L1DropTransactionEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DropTransaction event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
|
||||
EventType: orm.MessageQueueEventTypeDropTransaction,
|
||||
QueueIndex: event.Index.Uint64(),
|
||||
TxHash: vlog.TxHash,
|
||||
})
|
||||
}
|
||||
}
|
||||
return l1MessageQueueEvents, nil
|
||||
}
|
||||
316
bridge-history-api/internal/logic/l1_fetcher.go
Normal file
316
bridge-history-api/internal/logic/l1_fetcher.go
Normal file
@@ -0,0 +1,316 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// L1ReorgSafeDepth represents the number of block confirmations considered safe against L1 chain reorganizations.
|
||||
// Reorganizations at this depth under normal cases are extremely unlikely.
|
||||
const L1ReorgSafeDepth = 64
|
||||
|
||||
// L1FilterResult L1 fetcher result
|
||||
type L1FilterResult struct {
|
||||
DepositMessages []*orm.CrossMessage
|
||||
RelayedMessages []*orm.CrossMessage
|
||||
BatchEvents []*orm.BatchEvent
|
||||
MessageQueueEvents []*orm.MessageQueueEvent
|
||||
RevertedTxs []*orm.CrossMessage
|
||||
}
|
||||
|
||||
// L1FetcherLogic the L1 fetcher logic
|
||||
type L1FetcherLogic struct {
|
||||
cfg *config.LayerConfig
|
||||
client *ethclient.Client
|
||||
addressList []common.Address
|
||||
parser *L1EventParser
|
||||
db *gorm.DB
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
batchEventOrm *orm.BatchEvent
|
||||
|
||||
l1FetcherLogicFetchedTotal *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewL1FetcherLogic creates L1 fetcher logic
|
||||
func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
|
||||
addressList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.WETHGatewayAddr),
|
||||
common.HexToAddress(cfg.DAIGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.ERC721GatewayAddr),
|
||||
common.HexToAddress(cfg.ERC1155GatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.MessengerAddr),
|
||||
|
||||
common.HexToAddress(cfg.ScrollChainAddr),
|
||||
|
||||
common.HexToAddress(cfg.MessageQueueAddr),
|
||||
}
|
||||
|
||||
// Optional erc20 gateways.
|
||||
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
db: db,
|
||||
crossMessageOrm: orm.NewCrossMessage(db),
|
||||
batchEventOrm: orm.NewBatchEvent(db),
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
addressList: addressList,
|
||||
parser: NewL1EventParser(),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
f.l1FetcherLogicFetchedTotal = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "L1_fetcher_logic_fetched_total",
|
||||
Help: "The total number of events or failed txs fetched in L1 fetcher logic.",
|
||||
}, []string{"type"})
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.Block, error) {
|
||||
blocks, err := utils.GetL1BlocksInRange(ctx, f.client, from, to)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 blocks in range", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
for _, block := range blocks {
|
||||
if block.ParentHash() != lastBlockHash {
|
||||
log.Warn("L1 reorg detected", "reorg height", block.NumberU64()-1, "expected hash", block.ParentHash().String(), "local hash", lastBlockHash.String())
|
||||
var resyncHeight uint64
|
||||
if block.NumberU64() > L1ReorgSafeDepth+1 {
|
||||
resyncHeight = block.NumberU64() - L1ReorgSafeDepth - 1
|
||||
}
|
||||
header, err := f.client.HeaderByNumber(ctx, new(big.Int).SetUint64(resyncHeight))
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 header by number", "block number", resyncHeight, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
return true, resyncHeight, header.Hash(), nil, nil
|
||||
}
|
||||
lastBlockHash = block.Hash()
|
||||
}
|
||||
|
||||
return false, 0, lastBlockHash, blocks, nil
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, error) {
|
||||
var l1RevertedTxs []*orm.CrossMessage
|
||||
blockTimestampsMap := make(map[uint64]uint64)
|
||||
|
||||
for i := from; i <= to; i++ {
|
||||
block := blocks[i-from]
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
txTo := tx.To()
|
||||
if txTo == nil {
|
||||
continue
|
||||
}
|
||||
toAddress := txTo.String()
|
||||
|
||||
// GatewayRouter: L1 deposit.
|
||||
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
|
||||
if toAddress != f.cfg.GatewayRouterAddr && toAddress != f.cfg.MessengerAddr {
|
||||
continue
|
||||
}
|
||||
|
||||
var receipt *types.Receipt
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
return nil, nil, receiptErr
|
||||
}
|
||||
|
||||
// Check if the transaction is failed
|
||||
if receipt.Status != types.ReceiptStatusFailed {
|
||||
continue
|
||||
}
|
||||
|
||||
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
|
||||
sender, senderErr := signer.Sender(tx)
|
||||
if senderErr != nil {
|
||||
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", senderErr)
|
||||
return nil, nil, senderErr
|
||||
}
|
||||
|
||||
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
|
||||
L1TxHash: tx.Hash().String(),
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
Sender: sender.String(),
|
||||
Receiver: (*tx.To()).String(),
|
||||
L1BlockNumber: receipt.BlockNumber.Uint64(),
|
||||
BlockTimestamp: block.Time(),
|
||||
TxStatus: int(orm.TxStatusTypeSentTxReverted),
|
||||
})
|
||||
}
|
||||
}
|
||||
return blockTimestampsMap, l1RevertedTxs, nil
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
|
||||
query := ethereum.FilterQuery{
|
||||
FromBlock: new(big.Int).SetUint64(from), // inclusive
|
||||
ToBlock: new(big.Int).SetUint64(to), // inclusive
|
||||
Addresses: f.addressList,
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
|
||||
query.Topics[0] = make([]common.Hash, 13)
|
||||
query.Topics[0][0] = backendabi.L1DepositETHSig
|
||||
query.Topics[0][1] = backendabi.L1DepositERC20Sig
|
||||
query.Topics[0][2] = backendabi.L1DepositERC721Sig
|
||||
query.Topics[0][3] = backendabi.L1DepositERC1155Sig
|
||||
query.Topics[0][4] = backendabi.L1SentMessageEventSig
|
||||
query.Topics[0][5] = backendabi.L1RelayedMessageEventSig
|
||||
query.Topics[0][6] = backendabi.L1FailedRelayedMessageEventSig
|
||||
query.Topics[0][7] = backendabi.L1CommitBatchEventSig
|
||||
query.Topics[0][8] = backendabi.L1RevertBatchEventSig
|
||||
query.Topics[0][9] = backendabi.L1FinalizeBatchEventSig
|
||||
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
|
||||
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
|
||||
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
|
||||
|
||||
eventLogs, err := f.client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
log.Error("failed to filter L1 event logs", "from", from, "to", to, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
return eventLogs, nil
|
||||
}
|
||||
|
||||
// L1Fetcher L1 fetcher
|
||||
func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, *L1FilterResult, error) {
|
||||
log.Info("fetch and save L1 events", "from", from, "to", to)
|
||||
|
||||
isReorg, reorgHeight, blockHash, blocks, getErr := f.getBlocksAndDetectReorg(ctx, from, to, lastBlockHash)
|
||||
if getErr != nil {
|
||||
log.Error("L1Fetcher getBlocksAndDetectReorg failed", "from", from, "to", to, "error", getErr)
|
||||
return false, 0, common.Hash{}, nil, getErr
|
||||
}
|
||||
|
||||
if isReorg {
|
||||
return isReorg, reorgHeight, blockHash, nil, nil
|
||||
}
|
||||
|
||||
blockTimestampsMap, l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
|
||||
if err != nil {
|
||||
log.Error("L1Fetcher getRevertedTxs failed", "from", from, "to", to, "error", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
eventLogs, err := f.l1FetcherLogs(ctx, from, to)
|
||||
if err != nil {
|
||||
log.Error("L1Fetcher l1FetcherLogs failed", "from", from, "to", to, "error", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(eventLogs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L1 cross chain event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L1 batch event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l1MessageQueueEvents, err := f.parser.ParseL1MessageQueueEventLogs(eventLogs, l1DepositMessages)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L1 message queue event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
res := L1FilterResult{
|
||||
DepositMessages: l1DepositMessages,
|
||||
RelayedMessages: l1RelayedMessages,
|
||||
BatchEvents: l1BatchEvents,
|
||||
MessageQueueEvents: l1MessageQueueEvents,
|
||||
RevertedTxs: l1RevertedTxs,
|
||||
}
|
||||
|
||||
f.updateMetrics(res)
|
||||
|
||||
return false, 0, blockHash, &res, nil
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_gateway_router_transaction").Add(float64(len(res.RevertedTxs)))
|
||||
|
||||
for _, depositMessage := range res.DepositMessages {
|
||||
switch orm.TokenType(depositMessage.TokenType) {
|
||||
case orm.TokenTypeETH:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_eth").Add(1)
|
||||
case orm.TokenTypeERC20:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc20").Add(1)
|
||||
case orm.TokenTypeERC721:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc721").Add(1)
|
||||
case orm.TokenTypeERC1155:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc1155").Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, relayedMessage := range res.RelayedMessages {
|
||||
switch orm.TxStatusType(relayedMessage.TxStatus) {
|
||||
case orm.TxStatusTypeRelayed:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_relayed_message").Add(1)
|
||||
case orm.TxStatusTypeFailedRelayed:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_relayed_message").Add(1)
|
||||
}
|
||||
// Have not tracked L1 relayed message reverted transaction yet.
|
||||
// 1. need to parse calldata of tx.
|
||||
// 2. hard to track internal tx.
|
||||
}
|
||||
|
||||
for _, batchEvent := range res.BatchEvents {
|
||||
switch orm.BatchStatusType(batchEvent.BatchStatus) {
|
||||
case orm.BatchStatusTypeCommitted:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_commit_batch_event").Add(1)
|
||||
case orm.BatchStatusTypeReverted:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_revert_batch_event").Add(1)
|
||||
case orm.BatchStatusTypeFinalized:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_finalize_batch_event").Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, messageQueueEvent := range res.MessageQueueEvents {
|
||||
switch messageQueueEvent.EventType {
|
||||
case orm.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_replay_message_or_enforced_transaction").Add(1)
|
||||
case orm.MessageQueueEventTypeDequeueTransaction:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1)
|
||||
case orm.MessageQueueEventTypeDropTransaction:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
167
bridge-history-api/internal/logic/l2_event_parser.go
Normal file
167
bridge-history-api/internal/logic/l2_event_parser.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// L2EventParser the L2 event parser
|
||||
type L2EventParser struct {
|
||||
}
|
||||
|
||||
// NewL2EventParser creates the L2 event parser
|
||||
func NewL2EventParser() *L2EventParser {
|
||||
return &L2EventParser{}
|
||||
}
|
||||
|
||||
// ParseL2EventLogs parses L2 watched events
|
||||
func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l2WithdrawMessages []*orm.CrossMessage
|
||||
var l2RelayedMessages []*orm.CrossMessage
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L2WithdrawETHSig:
|
||||
event := backendabi.ETHMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ETHGatewayABI, &event, "WithdrawETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawETH event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeETH)
|
||||
lastMessage.TokenAmounts = event.Amount.String()
|
||||
case backendabi.L2WithdrawERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC20GatewayABI, &event, "WithdrawERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC20)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenAmounts = event.Amount.String()
|
||||
case backendabi.L2WithdrawERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC721)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = event.TokenID.String()
|
||||
case backendabi.L2BatchWithdrawERC721Sig:
|
||||
event := backendabi.BatchERC721MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC721)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
|
||||
case backendabi.L2WithdrawERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC1155)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = event.TokenID.String()
|
||||
lastMessage.TokenAmounts = event.Amount.String()
|
||||
case backendabi.L2BatchWithdrawERC1155Sig:
|
||||
event := backendabi.BatchERC1155MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC1155)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
|
||||
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
|
||||
case backendabi.L2SentMessageEventSig:
|
||||
event := backendabi.L2SentMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l2WithdrawMessages = append(l2WithdrawMessages, &orm.CrossMessage{
|
||||
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
|
||||
Sender: event.Sender.String(),
|
||||
Receiver: event.Target.String(),
|
||||
TokenType: int(orm.TokenTypeETH),
|
||||
L2TxHash: vlog.TxHash.String(),
|
||||
TokenAmounts: event.Value.String(),
|
||||
MessageFrom: event.Sender.String(),
|
||||
MessageTo: event.Target.String(),
|
||||
MessageValue: event.Value.String(),
|
||||
MessageNonce: event.MessageNonce.Uint64(),
|
||||
MessageData: hexutil.Encode(event.Message),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
TxStatus: int(orm.TxStatusTypeSent),
|
||||
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
|
||||
L2BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
case backendabi.L2RelayedMessageEventSig:
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
|
||||
MessageHash: event.MessageHash.String(),
|
||||
L2BlockNumber: vlog.BlockNumber,
|
||||
L2TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(orm.TxStatusTypeRelayed),
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
})
|
||||
case backendabi.L2FailedRelayedMessageEventSig:
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "FailedRelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
|
||||
MessageHash: event.MessageHash.String(),
|
||||
L2BlockNumber: vlog.BlockNumber,
|
||||
L2TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(orm.TxStatusTypeFailedRelayed),
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
})
|
||||
}
|
||||
}
|
||||
return l2WithdrawMessages, l2RelayedMessages, nil
|
||||
}
|
||||
281
bridge-history-api/internal/logic/l2_fetcher.go
Normal file
281
bridge-history-api/internal/logic/l2_fetcher.go
Normal file
@@ -0,0 +1,281 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// L2ReorgSafeDepth represents the number of block confirmations considered safe against L2 chain reorganizations.
|
||||
// Reorganizations at this depth under normal cases are extremely unlikely.
|
||||
const L2ReorgSafeDepth = 256
|
||||
|
||||
// L2FilterResult the L2 filter result
|
||||
type L2FilterResult struct {
|
||||
WithdrawMessages []*orm.CrossMessage
|
||||
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
|
||||
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
|
||||
}
|
||||
|
||||
// L2FetcherLogic the L2 fetcher logic
|
||||
type L2FetcherLogic struct {
|
||||
cfg *config.LayerConfig
|
||||
client *ethclient.Client
|
||||
addressList []common.Address
|
||||
parser *L2EventParser
|
||||
db *gorm.DB
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
batchEventOrm *orm.BatchEvent
|
||||
|
||||
l2FetcherLogicFetchedTotal *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewL2FetcherLogic create L2 fetcher logic
|
||||
func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
|
||||
addressList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.WETHGatewayAddr),
|
||||
common.HexToAddress(cfg.DAIGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.ERC721GatewayAddr),
|
||||
common.HexToAddress(cfg.ERC1155GatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.MessengerAddr),
|
||||
}
|
||||
|
||||
// Optional erc20 gateways.
|
||||
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList)
|
||||
|
||||
f := &L2FetcherLogic{
|
||||
db: db,
|
||||
crossMessageOrm: orm.NewCrossMessage(db),
|
||||
batchEventOrm: orm.NewBatchEvent(db),
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
addressList: addressList,
|
||||
parser: NewL2EventParser(),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
f.l2FetcherLogicFetchedTotal = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "L2_fetcher_logic_fetched_total",
|
||||
Help: "The total number of events or failed txs fetched in L2 fetcher logic.",
|
||||
}, []string{"type"})
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *L2FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.BlockWithRowConsumption, error) {
|
||||
blocks, err := utils.GetL2BlocksInRange(ctx, f.client, from, to)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 blocks in range", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
for _, block := range blocks {
|
||||
if block.ParentHash() != lastBlockHash {
|
||||
log.Warn("L2 reorg detected", "reorg height", block.NumberU64()-1, "expected hash", block.ParentHash().String(), "local hash", lastBlockHash.String())
|
||||
var resyncHeight uint64
|
||||
if block.NumberU64() > L2ReorgSafeDepth+1 {
|
||||
resyncHeight = block.NumberU64() - L2ReorgSafeDepth - 1
|
||||
}
|
||||
header, err := f.client.HeaderByNumber(ctx, new(big.Int).SetUint64(resyncHeight))
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 header by number", "block number", resyncHeight, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
return true, resyncHeight, header.Hash(), nil, nil
|
||||
}
|
||||
lastBlockHash = block.Hash()
|
||||
}
|
||||
|
||||
return false, 0, lastBlockHash, blocks, nil
|
||||
}
|
||||
|
||||
func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.BlockWithRowConsumption) (map[uint64]uint64, []*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l2RevertedUserTxs []*orm.CrossMessage
|
||||
var l2RevertedRelayedMessageTxs []*orm.CrossMessage
|
||||
blockTimestampsMap := make(map[uint64]uint64)
|
||||
|
||||
for i := from; i <= to; i++ {
|
||||
block := blocks[i-from]
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
txTo := tx.To()
|
||||
if txTo == nil {
|
||||
continue
|
||||
}
|
||||
toAddress := txTo.String()
|
||||
|
||||
// GatewayRouter: L2 withdrawal.
|
||||
if toAddress == f.cfg.GatewayRouterAddr {
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
return nil, nil, nil, receiptErr
|
||||
}
|
||||
|
||||
// Check if the transaction is failed
|
||||
if receipt.Status == types.ReceiptStatusFailed {
|
||||
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
|
||||
sender, signerErr := signer.Sender(tx)
|
||||
if signerErr != nil {
|
||||
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
|
||||
return nil, nil, nil, signerErr
|
||||
}
|
||||
|
||||
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
|
||||
L2TxHash: tx.Hash().String(),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
Sender: sender.String(),
|
||||
Receiver: (*tx.To()).String(),
|
||||
L2BlockNumber: receipt.BlockNumber.Uint64(),
|
||||
BlockTimestamp: block.Time(),
|
||||
TxStatus: int(orm.TxStatusTypeSentTxReverted),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if tx.Type() == types.L1MessageTxType {
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
return nil, nil, nil, receiptErr
|
||||
}
|
||||
|
||||
// Check if the transaction is failed
|
||||
if receipt.Status == types.ReceiptStatusFailed {
|
||||
l2RevertedRelayedMessageTxs = append(l2RevertedRelayedMessageTxs, &orm.CrossMessage{
|
||||
MessageHash: common.BytesToHash(crypto.Keccak256(tx.AsL1MessageTx().Data)).String(),
|
||||
L2TxHash: tx.Hash().String(),
|
||||
TxStatus: int(orm.TxStatusTypeRelayTxReverted),
|
||||
L2BlockNumber: receipt.BlockNumber.Uint64(),
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return blockTimestampsMap, l2RevertedUserTxs, l2RevertedRelayedMessageTxs, nil
|
||||
}
|
||||
|
||||
func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
|
||||
query := ethereum.FilterQuery{
|
||||
FromBlock: new(big.Int).SetUint64(from), // inclusive
|
||||
ToBlock: new(big.Int).SetUint64(to), // inclusive
|
||||
Addresses: f.addressList,
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 7)
|
||||
query.Topics[0][0] = backendabi.L2WithdrawETHSig
|
||||
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
|
||||
query.Topics[0][2] = backendabi.L2WithdrawERC721Sig
|
||||
query.Topics[0][3] = backendabi.L2WithdrawERC1155Sig
|
||||
query.Topics[0][4] = backendabi.L2SentMessageEventSig
|
||||
query.Topics[0][5] = backendabi.L2RelayedMessageEventSig
|
||||
query.Topics[0][6] = backendabi.L2FailedRelayedMessageEventSig
|
||||
|
||||
eventLogs, err := f.client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
log.Error("Failed to filter L2 event logs", "from", from, "to", to, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
return eventLogs, nil
|
||||
}
|
||||
|
||||
// L2Fetcher L2 fetcher
|
||||
func (f *L2FetcherLogic) L2Fetcher(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, *L2FilterResult, error) {
|
||||
log.Info("fetch and save L2 events", "from", from, "to", to)
|
||||
|
||||
isReorg, reorgHeight, blockHash, blocks, getErr := f.getBlocksAndDetectReorg(ctx, from, to, lastBlockHash)
|
||||
if getErr != nil {
|
||||
log.Error("L2Fetcher getBlocksAndDetectReorg failed", "from", from, "to", to, "error", getErr)
|
||||
return false, 0, common.Hash{}, nil, getErr
|
||||
}
|
||||
|
||||
if isReorg {
|
||||
return isReorg, reorgHeight, blockHash, nil, nil
|
||||
}
|
||||
|
||||
blockTimestampsMap, revertedUserTxs, revertedRelayMsgs, routerErr := f.getRevertedTxs(ctx, from, to, blocks)
|
||||
if routerErr != nil {
|
||||
log.Error("L2Fetcher getRevertedTxs failed", "from", from, "to", to, "error", routerErr)
|
||||
return false, 0, common.Hash{}, nil, routerErr
|
||||
}
|
||||
|
||||
eventLogs, err := f.l2FetcherLogs(ctx, from, to)
|
||||
if err != nil {
|
||||
log.Error("L2Fetcher l2FetcherLogs failed", "from", from, "to", to, "error", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(eventLogs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L2 event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
res := L2FilterResult{
|
||||
WithdrawMessages: l2WithdrawMessages,
|
||||
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
|
||||
OtherRevertedTxs: revertedUserTxs,
|
||||
}
|
||||
|
||||
f.updateMetrics(res)
|
||||
|
||||
return false, 0, blockHash, &res, nil
|
||||
}
|
||||
|
||||
func (f *L2FetcherLogic) updateMetrics(res L2FilterResult) {
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_gateway_router_transaction").Add(float64(len(res.OtherRevertedTxs)))
|
||||
|
||||
for _, withdrawMessage := range res.WithdrawMessages {
|
||||
switch orm.TokenType(withdrawMessage.TokenType) {
|
||||
case orm.TokenTypeETH:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_eth").Add(1)
|
||||
case orm.TokenTypeERC20:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc20").Add(1)
|
||||
case orm.TokenTypeERC721:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc721").Add(1)
|
||||
case orm.TokenTypeERC1155:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc1155").Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, relayedMessage := range res.RelayedMessages {
|
||||
switch orm.TxStatusType(relayedMessage.TxStatus) {
|
||||
case orm.TxStatusTypeRelayed:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_relayed_message").Add(1)
|
||||
case orm.TxStatusTypeFailedRelayed:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_relayed_message").Add(1)
|
||||
case orm.TxStatusTypeRelayTxReverted:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_reverted_relayed_message_transaction").Add(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
141
bridge-history-api/internal/orm/batch_event.go
Normal file
141
bridge-history-api/internal/orm/batch_event.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// BatchStatusType represents the type of batch status.
|
||||
type BatchStatusType int
|
||||
|
||||
// Constants for BatchStatusType.
|
||||
const (
|
||||
BatchStatusTypeUnknown BatchStatusType = iota
|
||||
BatchStatusTypeCommitted
|
||||
BatchStatusTypeReverted
|
||||
BatchStatusTypeFinalized
|
||||
)
|
||||
|
||||
// UpdateStatusType represents the whether batch info is updated in message table.
|
||||
type UpdateStatusType int
|
||||
|
||||
// Constants for UpdateStatusType.
|
||||
const (
|
||||
UpdateStatusTypeUnupdated UpdateStatusType = iota
|
||||
UpdateStatusTypeUpdated
|
||||
)
|
||||
|
||||
// BatchEvent represents a batch event.
|
||||
type BatchEvent struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id;primary_key"`
|
||||
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
|
||||
BatchStatus int `json:"batch_status" gorm:"column:batch_status"`
|
||||
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
UpdateStatus int `json:"update_status" gorm:"column:update_status"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
|
||||
}
|
||||
|
||||
// TableName returns the table name for the BatchEvent model.
|
||||
func (*BatchEvent) TableName() string {
|
||||
return "batch_event_v2"
|
||||
}
|
||||
|
||||
// NewBatchEvent returns a new instance of BatchEvent.
|
||||
func NewBatchEvent(db *gorm.DB) *BatchEvent {
|
||||
return &BatchEvent{db: db}
|
||||
}
|
||||
|
||||
// GetBatchEventSyncedHeightInDB returns the maximum l1_block_number from the batch_event_v2 table.
|
||||
func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64, error) {
|
||||
var batch BatchEvent
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&BatchEvent{})
|
||||
db = db.Order("l1_block_number desc")
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("failed to get batch synced height in db, error: %w", err)
|
||||
}
|
||||
return batch.L1BlockNumber, nil
|
||||
}
|
||||
|
||||
// GetFinalizedBatchesLEBlockHeight returns the finalized batches with end block <= given block height in db.
|
||||
func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, blockHeight uint64) ([]*BatchEvent, error) {
|
||||
var batches []*BatchEvent
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&BatchEvent{})
|
||||
db = db.Where("end_block_number <= ?", blockHeight)
|
||||
db = db.Where("batch_status = ?", BatchStatusTypeFinalized)
|
||||
db = db.Where("update_status = ?", UpdateStatusTypeUnupdated)
|
||||
db = db.Order("batch_index asc")
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get batches >= block height, error: %w", err)
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// InsertOrUpdateBatchEvents inserts a new batch event or updates an existing one based on the BatchStatusType.
|
||||
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent, dbTX ...*gorm.DB) error {
|
||||
for _, l1BatchEvent := range l1BatchEvents {
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&BatchEvent{})
|
||||
updateFields := make(map[string]interface{})
|
||||
switch BatchStatusType(l1BatchEvent.BatchStatus) {
|
||||
case BatchStatusTypeCommitted:
|
||||
if err := db.Create(l1BatchEvent).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert batch event, error: %w", err)
|
||||
}
|
||||
case BatchStatusTypeFinalized:
|
||||
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
|
||||
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
|
||||
updateFields["batch_status"] = BatchStatusTypeFinalized
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch event, error: %w", err)
|
||||
}
|
||||
case BatchStatusTypeReverted:
|
||||
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
|
||||
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
|
||||
updateFields["batch_status"] = BatchStatusTypeReverted
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch event, error: %w", err)
|
||||
}
|
||||
// Soft delete the batch event.
|
||||
if err := db.Delete(l1BatchEvent).Error; err != nil {
|
||||
return fmt.Errorf("failed to soft delete batch event, error: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBatchEventStatus updates the UpdateStatusType of a BatchEvent given its batch index.
|
||||
func (c *BatchEvent) UpdateBatchEventStatus(ctx context.Context, batchIndex uint64) error {
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&BatchEvent{})
|
||||
db = db.Where("batch_index = ?", batchIndex)
|
||||
updateFields := map[string]interface{}{
|
||||
"update_status": UpdateStatusTypeUpdated,
|
||||
}
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch event status, batchIndex: %d, error: %w", batchIndex, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
545
bridge-history-api/internal/orm/cross_message.go
Normal file
545
bridge-history-api/internal/orm/cross_message.go
Normal file
@@ -0,0 +1,545 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// TokenType represents the type of token.
|
||||
type TokenType int
|
||||
|
||||
// Constants for TokenType.
|
||||
const (
|
||||
TokenTypeUnknown TokenType = iota
|
||||
TokenTypeETH
|
||||
TokenTypeERC20
|
||||
TokenTypeERC721
|
||||
TokenTypeERC1155
|
||||
)
|
||||
|
||||
// MessageType represents the type of message.
|
||||
type MessageType int
|
||||
|
||||
// Constants for MessageType.
|
||||
const (
|
||||
MessageTypeUnknown MessageType = iota
|
||||
MessageTypeL1SentMessage
|
||||
MessageTypeL2SentMessage
|
||||
)
|
||||
|
||||
// TxStatusType represents the status of a transaction.
|
||||
type TxStatusType int
|
||||
|
||||
// Constants for TxStatusType.
|
||||
const (
|
||||
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
|
||||
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
|
||||
// from a later status (e.g., relayed) back to "sent".
|
||||
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
|
||||
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
|
||||
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
|
||||
TxStatusTypeSent TxStatusType = iota
|
||||
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
|
||||
TxStatusTypeRelayed // Terminal status.
|
||||
// FailedRelayedMessage event: encoded tx failed, cannot retry. e.g., https://sepolia.scrollscan.com/tx/0xfc7d3ea5ec8dc9b664a5a886c3b33d21e665355057601033481a439498efb79a
|
||||
TxStatusTypeFailedRelayed // Terminal status.
|
||||
// In some cases, user can retry with a larger gas limit. e.g., https://sepolia.scrollscan.com/tx/0x7323a7ba29492cb47d92206411be99b27896f2823cee0633a596b646b73f1b5b
|
||||
TxStatusTypeRelayTxReverted
|
||||
TxStatusTypeSkipped
|
||||
TxStatusTypeDropped // Terminal status.
|
||||
)
|
||||
|
||||
// RollupStatusType represents the status of a rollup.
|
||||
type RollupStatusType int
|
||||
|
||||
// Constants for RollupStatusType.
|
||||
const (
|
||||
RollupStatusTypeUnknown RollupStatusType = iota
|
||||
RollupStatusTypeFinalized // only batch finalized status is used.
|
||||
)
|
||||
|
||||
// MessageQueueEventType represents the type of message queue event.
|
||||
type MessageQueueEventType int
|
||||
|
||||
// Constants for MessageQueueEventType.
|
||||
const (
|
||||
MessageQueueEventTypeUnknown MessageQueueEventType = iota
|
||||
MessageQueueEventTypeQueueTransaction
|
||||
MessageQueueEventTypeDequeueTransaction
|
||||
MessageQueueEventTypeDropTransaction
|
||||
)
|
||||
|
||||
// MessageQueueEvent struct represents the details of a batch event.
|
||||
type MessageQueueEvent struct {
|
||||
EventType MessageQueueEventType
|
||||
QueueIndex uint64
|
||||
|
||||
// Track replay tx hash and refund tx hash.
|
||||
TxHash common.Hash
|
||||
|
||||
// QueueTransaction only in replayMessage, to track which message is replayed.
|
||||
MessageHash common.Hash
|
||||
}
|
||||
|
||||
// CrossMessage represents a cross message.
|
||||
type CrossMessage struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id;primary_key"`
|
||||
MessageType int `json:"message_type" gorm:"column:message_type"`
|
||||
RollupStatus int `json:"rollup_status" gorm:"column:rollup_status"`
|
||||
TxStatus int `json:"tx_status" gorm:"column:tx_status"`
|
||||
TokenType int `json:"token_type" gorm:"column:token_type"`
|
||||
Sender string `json:"sender" gorm:"column:sender"`
|
||||
Receiver string `json:"receiver" gorm:"column:receiver"`
|
||||
MessageHash string `json:"message_hash" gorm:"column:message_hash"`
|
||||
L1TxHash string `json:"l1_tx_hash" gorm:"column:l1_tx_hash"` // initial tx hash, if MessageType is MessageTypeL1SentMessage.
|
||||
L1ReplayTxHash string `json:"l1_replay_tx_hash" gorm:"column:l1_replay_tx_hash"`
|
||||
L1RefundTxHash string `json:"l1_refund_tx_hash" gorm:"column:l1_refund_tx_hash"`
|
||||
L2TxHash string `json:"l2_tx_hash" gorm:"column:l2_tx_hash"` // initial tx hash, if MessageType is MessageTypeL2SentMessage.
|
||||
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
|
||||
L2BlockNumber uint64 `json:"l2_block_number" gorm:"column:l2_block_number"`
|
||||
L1TokenAddress string `json:"l1_token_address" gorm:"column:l1_token_address"`
|
||||
L2TokenAddress string `json:"l2_token_address" gorm:"column:l2_token_address"`
|
||||
TokenIDs string `json:"token_ids" gorm:"column:token_ids"`
|
||||
TokenAmounts string `json:"token_amounts" gorm:"column:token_amounts"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" gorm:"column:block_timestamp"`
|
||||
MessageFrom string `json:"message_from" gorm:"column:message_from"`
|
||||
MessageTo string `json:"message_to" gorm:"column:message_to"`
|
||||
MessageValue string `json:"message_value" gorm:"column:message_value"`
|
||||
MessageNonce uint64 `json:"message_nonce" gorm:"column:message_nonce"`
|
||||
MessageData string `json:"message_data" gorm:"column:message_data"`
|
||||
MerkleProof []byte `json:"merkle_proof" gorm:"column:merkle_proof"`
|
||||
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
|
||||
}
|
||||
|
||||
// TableName returns the table name for the CrossMessage model.
|
||||
func (*CrossMessage) TableName() string {
|
||||
return "cross_message_v2"
|
||||
}
|
||||
|
||||
// NewCrossMessage returns a new instance of CrossMessage.
|
||||
func NewCrossMessage(db *gorm.DB) *CrossMessage {
|
||||
return &CrossMessage{db: db}
|
||||
}
|
||||
|
||||
// GetMessageSyncedHeightInDB returns the latest synced cross message height from the database for a given message type.
|
||||
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType MessageType) (uint64, error) {
|
||||
var message CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", messageType)
|
||||
switch {
|
||||
case messageType == MessageTypeL1SentMessage:
|
||||
db = db.Order("l1_block_number desc")
|
||||
case messageType == MessageTypeL2SentMessage:
|
||||
db = db.Order("l2_block_number desc")
|
||||
}
|
||||
if err := db.First(&message).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
|
||||
}
|
||||
switch {
|
||||
case messageType == MessageTypeL1SentMessage:
|
||||
return message.L1BlockNumber, nil
|
||||
case messageType == MessageTypeL2SentMessage:
|
||||
return message.L2BlockNumber, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid message type: %v", messageType)
|
||||
}
|
||||
}
|
||||
|
||||
// GetL2LatestFinalizedWithdrawal returns the latest finalized L2 withdrawal from the database.
|
||||
func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*CrossMessage, error) {
|
||||
var message CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
|
||||
db = db.Where("rollup_status = ?", RollupStatusTypeFinalized)
|
||||
db = db.Order("message_nonce desc")
|
||||
if err := db.First(&message).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
|
||||
}
|
||||
return &message, nil
|
||||
}
|
||||
|
||||
// GetL2WithdrawalsByBlockRange returns the L2 withdrawals by block range from the database.
|
||||
func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBlock, endBlock uint64) ([]*CrossMessage, error) {
|
||||
var messages []*CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("l2_block_number >= ?", startBlock)
|
||||
db = db.Where("l2_block_number <= ?", endBlock)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeSentTxReverted)
|
||||
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
|
||||
db = db.Order("message_nonce asc")
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// GetMessagesByTxHashes retrieves all cross messages from the database that match the provided transaction hashes.
|
||||
func (c *CrossMessage) GetMessagesByTxHashes(ctx context.Context, txHashes []string) ([]*CrossMessage, error) {
|
||||
var messages []*CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("l1_tx_hash in (?) or l2_tx_hash in (?)", txHashes, txHashes)
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to get L2 messages by tx hashes, tx hashes: %v, error: %w", txHashes, err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// GetL2UnclaimedWithdrawalsByAddress retrieves all L2 unclaimed withdrawal messages for a given sender address.
|
||||
func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, sender string) ([]*CrossMessage, error) {
|
||||
var messages []*CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
|
||||
db = db.Where("tx_status = ?", TxStatusTypeSent)
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to get L2 claimable withdrawal messages by sender address, sender: %v, error: %w", sender, err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// GetL2WithdrawalsByAddress retrieves all L2 claimable withdrawal messages for a given sender address.
|
||||
func (c *CrossMessage) GetL2WithdrawalsByAddress(ctx context.Context, sender string) ([]*CrossMessage, error) {
|
||||
var messages []*CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to get L2 withdrawal messages by sender address, sender: %v, error: %w", sender, err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// GetTxsByAddress retrieves all txs for a given sender address.
|
||||
func (c *CrossMessage) GetTxsByAddress(ctx context.Context, sender string) ([]*CrossMessage, error) {
|
||||
var messages []*CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to get all txs by sender address, sender: %v, error: %w", sender, err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// UpdateL1MessageQueueEventsInfo updates the information about L1 message queue events in the database.
|
||||
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent, dbTX ...*gorm.DB) error {
|
||||
// update tx statuses.
|
||||
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
// do not over-write terminal statuses.
|
||||
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeFailedRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeDropped)
|
||||
txStatusUpdateFields := make(map[string]interface{})
|
||||
switch l1MessageQueueEvent.EventType {
|
||||
case MessageQueueEventTypeQueueTransaction:
|
||||
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
|
||||
// replayMessage case:
|
||||
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
|
||||
// Transaction reverted in L2: https://sepolia.scrollscan.com/tx/0xde6ef307a7da255888aad7a4c40a6b8c886e46a8a05883070bbf18b736cbfb8c
|
||||
// replayMessage: https://sepolia.etherscan.io/tx/0xa5392891232bb32d98fcdbaca0d91b4d22ef2755380d07d982eebd47b147ce28
|
||||
//
|
||||
// Note: update l1_tx_hash if the user calls replayMessage, cannot use queue index here,
|
||||
// because in replayMessage, queue index != message nonce.
|
||||
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
|
||||
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeSent // reset status to "sent".
|
||||
case MessageQueueEventTypeDequeueTransaction:
|
||||
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeSkipped
|
||||
case MessageQueueEventTypeDropTransaction:
|
||||
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeDropped
|
||||
}
|
||||
if err := db.Updates(txStatusUpdateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
|
||||
}
|
||||
}
|
||||
|
||||
// update tx hashes of replay and refund.
|
||||
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
txHashUpdateFields := make(map[string]interface{})
|
||||
switch l1MessageQueueEvent.EventType {
|
||||
case MessageQueueEventTypeQueueTransaction:
|
||||
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
|
||||
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
|
||||
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
|
||||
case MessageQueueEventTypeDropTransaction:
|
||||
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txHashUpdateFields["l1_refund_tx_hash"] = l1MessageQueueEvent.TxHash.String()
|
||||
}
|
||||
// Check if there are fields to update to avoid empty update operation (skip message).
|
||||
if len(txHashUpdateFields) > 0 {
|
||||
if err := db.Updates(txHashUpdateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBatchStatusOfL2Withdrawals updates batch status of L2 withdrawals.
|
||||
func (c *CrossMessage) UpdateBatchStatusOfL2Withdrawals(ctx context.Context, startBlockNumber, endBlockNumber, batchIndex uint64) error {
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
|
||||
db = db.Where("l2_block_number >= ?", startBlockNumber)
|
||||
db = db.Where("l2_block_number <= ?", endBlockNumber)
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["batch_index"] = batchIndex
|
||||
updateFields["rollup_status"] = RollupStatusTypeFinalized
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch status of L2 sent messages, start: %v, end: %v, index: %v, error: %w", startBlockNumber, endBlockNumber, batchIndex, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBatchIndexRollupStatusMerkleProofOfL2Messages updates the batch_index, rollup_status, and merkle_proof fields for a list of L2 cross messages.
|
||||
func (c *CrossMessage) UpdateBatchIndexRollupStatusMerkleProofOfL2Messages(ctx context.Context, messages []*CrossMessage) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, message := range messages {
|
||||
updateFields := map[string]interface{}{
|
||||
"batch_index": message.BatchIndex,
|
||||
"rollup_status": message.RollupStatus,
|
||||
"merkle_proof": message.MerkleProof,
|
||||
}
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_hash = ?", message.MessageHash)
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update L2 message with message_hash %s, error: %w", message.MessageHash, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertOrUpdateL1Messages inserts or updates a list of L1 cross messages into the database.
|
||||
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "message_hash"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"sender", "receiver", "token_type", "l1_block_number", "l1_tx_hash", "l1_token_address", "l2_token_address", "token_ids", "token_amounts", "message_type", "block_timestamp", "message_nonce"}),
|
||||
})
|
||||
if err := db.Create(messages).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert message, error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertOrUpdateL2Messages inserts or updates a list of L2 cross messages into the database.
|
||||
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
|
||||
// The merkle_proof is updated separately in batch status updates and hence is not included here.
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "message_hash"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"sender", "receiver", "token_type", "l2_block_number", "l2_tx_hash", "l1_token_address", "l2_token_address", "token_ids", "token_amounts", "message_type", "block_timestamp", "message_from", "message_to", "message_value", "message_data", "message_nonce"}),
|
||||
})
|
||||
if err := db.Create(messages).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert message, error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertFailedGatewayRouterTxs inserts a list of transactions that failed to interact with the gateway router into the database.
|
||||
// These failed transactions are only fetched once, so they are inserted without checking for duplicates.
|
||||
// To resolve unique index confliction, a random UUID will be generated and used as the MessageHash.
|
||||
func (c *CrossMessage) InsertFailedGatewayRouterTxs(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
for _, message := range messages {
|
||||
message.MessageHash = uuid.New().String()
|
||||
}
|
||||
if err := db.Create(messages).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertOrUpdateL2RelayedMessagesOfL1Deposits inserts or updates the database with a list of L2 relayed messages related to L1 deposits.
|
||||
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
if len(l2RelayedMessages) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Deduplicate messages, for each message_hash, retaining message with the highest block number.
|
||||
// This is necessary as a single message, like a FailedRelayedMessage or a reverted relayed transaction,
|
||||
// may be relayed multiple times within certain block ranges, potentially leading to the error:
|
||||
// "ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time (SQLSTATE 21000)".
|
||||
// This happens if we attempt to insert multiple records with the same message_hash in a single db.Create operation.
|
||||
// For example, see these transactions where the same message was relayed twice within certain block ranges:
|
||||
// Reverted tx 1: https://sepolia.scrollscan.com/tx/0xcd6979277c3bc747445273a5e58ef1e9692fbe101d88cfefbbb69d3aef3193c0
|
||||
// Reverted tx 2: https://sepolia.scrollscan.com/tx/0x43e28ed7cb71107c18c5d8ebbdb4a1d9cac73e60391d14d41e92985028faa337
|
||||
// Another example:
|
||||
// FailedRelayedMessage 1: https://sepolia.scrollscan.com/tx/0xfadb147fb211e5096446c5cac3ae0a8a705d2ece6c47c65135c8874f84638f17
|
||||
// FailedRelayedMessage 2: https://sepolia.scrollscan.com/tx/0x6cb149b61afd07bf2e17561a59ebebde41e343b6610290c97515b2f862160b42
|
||||
mergedL2RelayedMessages := make(map[string]*CrossMessage)
|
||||
for _, message := range l2RelayedMessages {
|
||||
if existing, found := mergedL2RelayedMessages[message.MessageHash]; found {
|
||||
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
|
||||
mergedL2RelayedMessages[message.MessageHash] = message
|
||||
}
|
||||
} else {
|
||||
mergedL2RelayedMessages[message.MessageHash] = message
|
||||
}
|
||||
}
|
||||
uniqueL2RelayedMessages := make([]*CrossMessage, 0, len(mergedL2RelayedMessages))
|
||||
for _, msg := range mergedL2RelayedMessages {
|
||||
uniqueL2RelayedMessages = append(uniqueL2RelayedMessages, msg)
|
||||
}
|
||||
// Do not update tx status of successfully or failed relayed messages,
|
||||
// because if a message is handled, the later relayed message tx would be reverted.
|
||||
// ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L2/L2ScrollMessenger.sol#L102
|
||||
// e.g.,
|
||||
// Successfully relayed: https://sepolia.scrollscan.com/tx/0x4eb7cb07ba76956259c0079819a34a146f8a93dd891dc94812e9b3d66b056ec7#eventlog
|
||||
// Reverted tx 1 (Reason: Message was already successfully executed): https://sepolia.scrollscan.com/tx/0x1973cafa14eb40734df30da7bfd4d9aceb53f8f26e09d96198c16d0e2e4a95fd
|
||||
// Reverted tx 2 (Reason: Message was already successfully executed): https://sepolia.scrollscan.com/tx/0x02fc3a28684a590aead2482022f56281539085bd3d273ac8dedc1ceccb2bc554
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "message_hash"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"message_type", "l2_block_number", "l2_tx_hash", "tx_status"}),
|
||||
Where: clause.Where{
|
||||
Exprs: []clause.Expression{
|
||||
clause.And(
|
||||
// do not over-write terminal statuses.
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err := db.Create(uniqueL2RelayedMessages).Error; err != nil {
|
||||
return fmt.Errorf("failed to update L2 reverted relayed message of L1 deposit, error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertOrUpdateL1RelayedMessagesOfL2Withdrawals inserts or updates the database with a list of L1 relayed messages related to L2 withdrawals.
|
||||
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
if len(l1RelayedMessages) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Deduplicate messages, for each message_hash, retaining message with the highest block number.
|
||||
// This is necessary as a single message, like a FailedRelayedMessage or a reverted relayed transaction,
|
||||
// may be relayed multiple times within certain block ranges, potentially leading to the error:
|
||||
// "ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time (SQLSTATE 21000)".
|
||||
// This happens if we attempt to insert multiple records with the same message_hash in a single db.Create operation.
|
||||
// For example, see these transactions where the same message was relayed twice within certain block ranges:
|
||||
// FailedRelayedMessage 1: https://sepolia.etherscan.io/tx/0x28b3212cda6ca0f3790f362a780257bbe2b37417ccf75a4eca6c3a08294c8f1b#eventlog
|
||||
// FailedRelayedMessage 2: https://sepolia.etherscan.io/tx/0xc8a8254825dd2cab5caef58cfd8d88c077ceadadc78f2340214a86cf8ab88543#eventlog
|
||||
// Another example (relayed success, then relayed again):
|
||||
// Relay Message, and success: https://sepolia.etherscan.io/tx/0xcfdf2f5446719e3e123a8aa06e4d6b3809c3850a13adf875755c8b1e423aa448#eventlog
|
||||
// Relay Message again, and reverted: https://sepolia.etherscan.io/tx/0xb1fcae7546f3de4cfd0b4d679f4075adb4eb69578b12e2b5673f5f24b1836578
|
||||
mergedL1RelayedMessages := make(map[string]*CrossMessage)
|
||||
for _, message := range l1RelayedMessages {
|
||||
if existing, found := mergedL1RelayedMessages[message.MessageHash]; found {
|
||||
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
|
||||
mergedL1RelayedMessages[message.MessageHash] = message
|
||||
}
|
||||
} else {
|
||||
mergedL1RelayedMessages[message.MessageHash] = message
|
||||
}
|
||||
}
|
||||
uniqueL1RelayedMessages := make([]*CrossMessage, 0, len(mergedL1RelayedMessages))
|
||||
for _, msg := range mergedL1RelayedMessages {
|
||||
uniqueL1RelayedMessages = append(uniqueL1RelayedMessages, msg)
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "message_hash"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"message_type", "l1_block_number", "l1_tx_hash", "tx_status"}),
|
||||
Where: clause.Where{
|
||||
Exprs: []clause.Expression{
|
||||
clause.And(
|
||||
// do not over-write terminal statuses.
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err := db.Create(uniqueL1RelayedMessages).Error; err != nil {
|
||||
return fmt.Errorf("failed to update L1 relayed message of L2 withdrawal, error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -18,7 +18,7 @@ const MigrationsDir string = "migrations"
|
||||
func init() {
|
||||
goose.SetBaseFS(embedMigrations)
|
||||
goose.SetSequential(true)
|
||||
goose.SetTableName("bridge_history_migrations")
|
||||
goose.SetTableName("bridge_historyv2_migrations")
|
||||
|
||||
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
|
||||
goose.SetVerbose(verbose)
|
||||
@@ -0,0 +1,57 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
CREATE TABLE cross_message_v2
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
message_type SMALLINT NOT NULL,
|
||||
tx_status SMALLINT NOT NULL,
|
||||
rollup_status SMALLINT NOT NULL,
|
||||
token_type SMALLINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
receiver VARCHAR NOT NULL,
|
||||
|
||||
message_hash VARCHAR DEFAULT NULL, -- NULL for failed txs
|
||||
l1_tx_hash VARCHAR DEFAULT NULL,
|
||||
l1_replay_tx_hash VARCHAR DEFAULT NULL,
|
||||
l1_refund_tx_hash VARCHAR DEFAULT NULL,
|
||||
l2_tx_hash VARCHAR DEFAULT NULL,
|
||||
l1_block_number BIGINT DEFAULT NULL,
|
||||
l2_block_number BIGINT DEFAULT NULL,
|
||||
l1_token_address VARCHAR DEFAULT NULL,
|
||||
l2_token_address VARCHAR DEFAULT NULL,
|
||||
token_ids VARCHAR DEFAULT NULL,
|
||||
token_amounts VARCHAR NOT NULL,
|
||||
block_timestamp BIGINT NOT NULL, -- timestamp to sort L1 Deposit & L2 Withdraw events altogether
|
||||
|
||||
--- claim info
|
||||
message_from VARCHAR DEFAULT NULL,
|
||||
message_to VARCHAR DEFAULT NULL,
|
||||
message_value VARCHAR DEFAULT NULL,
|
||||
message_nonce BIGINT DEFAULT NULL,
|
||||
message_data VARCHAR DEFAULT NULL,
|
||||
merkle_proof BYTEA DEFAULT NULL,
|
||||
batch_index BIGINT DEFAULT NULL,
|
||||
|
||||
-- metadata
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_cm_message_hash ON cross_message_v2 (message_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_cm_message_type_l1_block_number ON cross_message_v2 (message_type, l1_block_number DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_cm_message_type_l2_block_number ON cross_message_v2 (message_type, l2_block_number DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_cm_message_type_rollup_status_message_nonce ON cross_message_v2 (message_type, rollup_status, message_nonce DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_cm_message_type_message_nonce_tx_status_l2_block_number ON cross_message_v2 (message_type, message_nonce, tx_status, l2_block_number);
|
||||
CREATE INDEX IF NOT EXISTS idx_cm_l1_tx_hash ON cross_message_v2 (l1_tx_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_cm_l2_tx_hash ON cross_message_v2 (l2_tx_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_cm_message_type_tx_status_sender_block_timestamp ON cross_message_v2 (message_type, tx_status, sender, block_timestamp DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_cm_message_type_sender_block_timestamp ON cross_message_v2 (message_type, sender, block_timestamp DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_cm_sender_block_timestamp ON cross_message_v2 (sender, block_timestamp DESC);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE IF EXISTS cross_message_v2;
|
||||
-- +goose StatementEnd
|
||||
@@ -0,0 +1,28 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
CREATE TABLE batch_event_v2
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
l1_block_number BIGINT NOT NULL,
|
||||
batch_status SMALLINT NOT NULL,
|
||||
batch_index BIGINT NOT NULL,
|
||||
batch_hash VARCHAR NOT NULL,
|
||||
start_block_number BIGINT NOT NULL,
|
||||
end_block_number BIGINT NOT NULL,
|
||||
update_status SMALLINT NOT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_be_l1_block_number ON batch_event_v2 (l1_block_number);
|
||||
CREATE INDEX IF NOT EXISTS idx_be_batch_index ON batch_event_v2 (batch_index);
|
||||
CREATE INDEX IF NOT EXISTS idx_be_batch_index_batch_hash ON batch_event_v2 (batch_index, batch_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_be_end_block_number_update_status_batch_status_batch_index ON batch_event_v2 (end_block_number, update_status, batch_status, batch_index);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE IF EXISTS batch_event_v2;
|
||||
-- +goose StatementEnd
|
||||
@@ -5,26 +5,31 @@ import (
|
||||
|
||||
"github.com/gin-contrib/cors"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/internal/controller"
|
||||
"scroll-tech/common/observability"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/controller/api"
|
||||
)
|
||||
|
||||
// Route routes the APIs
|
||||
func Route(router *gin.Engine, conf *config.Config) {
|
||||
func Route(router *gin.Engine, conf *config.Config, reg prometheus.Registerer) {
|
||||
router.Use(cors.New(cors.Config{
|
||||
AllowOrigins: []string{"*"},
|
||||
AllowMethods: []string{"GET", "POST", "PUT", "DELETE"},
|
||||
AllowMethods: []string{"GET", "POST"},
|
||||
AllowHeaders: []string{"Origin", "Content-Type", "Authorization"},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 12 * time.Hour,
|
||||
}))
|
||||
|
||||
observability.Use(router, "bridge_history_api", reg)
|
||||
|
||||
r := router.Group("api/")
|
||||
r.GET("/txs", controller.HistoryCtrler.GetAllTxsByAddr)
|
||||
r.POST("/txsbyhashes", controller.HistoryCtrler.PostQueryTxsByHash)
|
||||
r.GET("/claimable", controller.HistoryCtrler.GetAllClaimableTxsByAddr)
|
||||
r.GET("/withdraw_root", controller.BatchCtrler.GetWithdrawRootByBatchIndex)
|
||||
r.GET("/health", controller.HealthCheck.HealthCheck)
|
||||
r.GET("/ready", controller.Ready.Ready)
|
||||
|
||||
r.GET("/txs", api.HistoryCtrler.GetTxsByAddress)
|
||||
r.GET("/l2/withdrawals", api.HistoryCtrler.GetL2WithdrawalsByAddress)
|
||||
r.GET("/l2/unclaimed/withdrawals", api.HistoryCtrler.GetL2UnclaimedWithdrawalsByAddress)
|
||||
|
||||
r.POST("/txsbyhashes", api.HistoryCtrler.PostQueryTxsByHashes)
|
||||
}
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
const (
|
||||
// Success shows OK.
|
||||
Success = 0
|
||||
// InternalServerError shows a fatal error in the server
|
||||
InternalServerError = 500
|
||||
// ErrParameterInvalidNo is invalid params
|
||||
ErrParameterInvalidNo = 40001
|
||||
// ErrGetClaimablesFailure is getting all claimables txs error
|
||||
ErrGetClaimablesFailure = 40002
|
||||
// ErrGetTxsByHashFailure is getting txs by hash list error
|
||||
ErrGetTxsByHashFailure = 40003
|
||||
// ErrGetTxsByAddrFailure is getting txs by address error
|
||||
ErrGetTxsByAddrFailure = 40004
|
||||
// ErrGetWithdrawRootByBatchIndexFailure is getting withdraw root by batch index error
|
||||
ErrGetWithdrawRootByBatchIndexFailure = 40005
|
||||
)
|
||||
|
||||
// QueryByAddressRequest the request parameter of address api
|
||||
type QueryByAddressRequest struct {
|
||||
Address string `form:"address" binding:"required"`
|
||||
Page int `form:"page" binding:"required"`
|
||||
PageSize int `form:"page_size" binding:"required"`
|
||||
}
|
||||
|
||||
// QueryByHashRequest the request parameter of hash api
|
||||
type QueryByHashRequest struct {
|
||||
Txs []string `raw:"txs" binding:"required"`
|
||||
}
|
||||
|
||||
// QueryByBatchIndexRequest the request parameter of batch index api
|
||||
type QueryByBatchIndexRequest struct {
|
||||
// BatchIndex can not be 0, because we dont decode the genesis block
|
||||
BatchIndex uint64 `form:"batch_index" binding:"required"`
|
||||
}
|
||||
|
||||
// ResultData contains return txs and total
|
||||
type ResultData struct {
|
||||
Result []*TxHistoryInfo `json:"result"`
|
||||
Total uint64 `json:"total"`
|
||||
}
|
||||
|
||||
// Response the response schema
|
||||
type Response struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
// Finalized the schema of tx finalized infos
|
||||
type Finalized struct {
|
||||
Hash string `json:"hash"`
|
||||
Amount string `json:"amount"`
|
||||
To string `json:"to"` // useless
|
||||
IsL1 bool `json:"isL1"`
|
||||
BlockNumber uint64 `json:"blockNumber"`
|
||||
BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss
|
||||
}
|
||||
|
||||
// UserClaimInfo the schema of tx claim infos
|
||||
type UserClaimInfo struct {
|
||||
From string `json:"from"`
|
||||
To string `json:"to"`
|
||||
Value string `json:"value"`
|
||||
Nonce string `json:"nonce"`
|
||||
BatchHash string `json:"batch_hash"`
|
||||
Message string `json:"message"`
|
||||
Proof string `json:"proof"`
|
||||
BatchIndex string `json:"batch_index"`
|
||||
}
|
||||
|
||||
// TxHistoryInfo the schema of tx history infos
|
||||
type TxHistoryInfo struct {
|
||||
Hash string `json:"hash"`
|
||||
Amount string `json:"amount"`
|
||||
To string `json:"to"` // useless
|
||||
IsL1 bool `json:"isL1"`
|
||||
L1Token string `json:"l1Token"`
|
||||
L2Token string `json:"l2Token"`
|
||||
BlockNumber uint64 `json:"blockNumber"`
|
||||
BlockTimestamp *time.Time `json:"blockTimestamp"` // useless
|
||||
FinalizeTx *Finalized `json:"finalizeTx"`
|
||||
ClaimInfo *UserClaimInfo `json:"claimInfo"`
|
||||
CreatedAt *time.Time `json:"createdTime"`
|
||||
}
|
||||
|
||||
// RenderJSON renders response with json
|
||||
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
|
||||
var errMsg string
|
||||
if err != nil {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
renderData := Response{
|
||||
ErrCode: errCode,
|
||||
ErrMsg: errMsg,
|
||||
Data: data,
|
||||
}
|
||||
ctx.JSON(http.StatusOK, renderData)
|
||||
}
|
||||
|
||||
// RenderSuccess renders success response with json
|
||||
func RenderSuccess(ctx *gin.Context, data interface{}) {
|
||||
RenderJSON(ctx, Success, nil, data)
|
||||
}
|
||||
|
||||
// RenderFailure renders failure response with json
|
||||
func RenderFailure(ctx *gin.Context, errCode int, err error) {
|
||||
RenderJSON(ctx, errCode, err, nil)
|
||||
}
|
||||
|
||||
// RenderFatal renders fatal response with json
|
||||
func RenderFatal(ctx *gin.Context, err error) {
|
||||
var errMsg string
|
||||
if err != nil {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
renderData := Response{
|
||||
ErrCode: InternalServerError,
|
||||
ErrMsg: errMsg,
|
||||
Data: nil,
|
||||
}
|
||||
ctx.Set("errcode", InternalServerError)
|
||||
ctx.JSON(http.StatusInternalServerError, renderData)
|
||||
}
|
||||
132
bridge-history-api/internal/types/types.go
Normal file
132
bridge-history-api/internal/types/types.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
)
|
||||
|
||||
const (
|
||||
// Success indicates that the operation was successful.
|
||||
Success = 0
|
||||
// InternalServerError represents a fatal error occurring on the server.
|
||||
InternalServerError = 500
|
||||
// ErrParameterInvalidNo represents an error when the parameters are invalid.
|
||||
ErrParameterInvalidNo = 40001
|
||||
// ErrGetL2ClaimableWithdrawalsError represents an error when trying to get L2 claimable withdrawal transactions.
|
||||
ErrGetL2ClaimableWithdrawalsError = 40002
|
||||
// ErrGetL2WithdrawalsError represents an error when trying to get L2 withdrawal transactions by address.
|
||||
ErrGetL2WithdrawalsError = 40003
|
||||
// ErrGetTxsError represents an error when trying to get transactions by address.
|
||||
ErrGetTxsError = 40004
|
||||
// ErrGetTxsByHashError represents an error when trying to get transactions by hash list.
|
||||
ErrGetTxsByHashError = 40005
|
||||
)
|
||||
|
||||
// QueryByAddressRequest the request parameter of address api
|
||||
type QueryByAddressRequest struct {
|
||||
Address string `form:"address" binding:"required"`
|
||||
Page uint64 `form:"page" binding:"required,min=1"`
|
||||
PageSize uint64 `form:"page_size" binding:"required,min=1,max=100"`
|
||||
}
|
||||
|
||||
// QueryByHashRequest the request parameter of hash api
|
||||
type QueryByHashRequest struct {
|
||||
Txs []string `json:"txs" binding:"required,min=1,max=100"`
|
||||
}
|
||||
|
||||
// ResultData contains return txs and total
|
||||
type ResultData struct {
|
||||
Results []*TxHistoryInfo `json:"results"`
|
||||
Total uint64 `json:"total"`
|
||||
}
|
||||
|
||||
// Response the response schema
|
||||
type Response struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
// CounterpartChainTx is the schema of counterpart chain tx info
|
||||
type CounterpartChainTx struct {
|
||||
Hash string `json:"hash"`
|
||||
BlockNumber uint64 `json:"block_number"`
|
||||
}
|
||||
|
||||
// ClaimInfo is the schema of tx claim info
|
||||
type ClaimInfo struct {
|
||||
From string `json:"from"`
|
||||
To string `json:"to"`
|
||||
Value string `json:"value"`
|
||||
Nonce string `json:"nonce"`
|
||||
Message string `json:"message"`
|
||||
Proof L2MessageProof `json:"proof"`
|
||||
Claimable bool `json:"claimable"`
|
||||
}
|
||||
|
||||
// L2MessageProof is the schema of L2 message proof
|
||||
type L2MessageProof struct {
|
||||
BatchIndex string `json:"batch_index"`
|
||||
MerkleProof string `json:"merkle_proof"`
|
||||
}
|
||||
|
||||
// TxHistoryInfo the schema of tx history infos
|
||||
type TxHistoryInfo struct {
|
||||
Hash string `json:"hash"`
|
||||
ReplayTxHash string `json:"replay_tx_hash"`
|
||||
RefundTxHash string `json:"refund_tx_hash"`
|
||||
MessageHash string `json:"message_hash"`
|
||||
TokenType orm.TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
|
||||
TokenIDs []string `json:"token_ids"` // only for erc721 and erc1155
|
||||
TokenAmounts []string `json:"token_amounts"` // for eth and erc20, the length is 1, for erc721 and erc1155, the length could be > 1
|
||||
MessageType orm.MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
|
||||
L1TokenAddress string `json:"l1_token_address"`
|
||||
L2TokenAddress string `json:"l2_token_address"`
|
||||
BlockNumber uint64 `json:"block_number"`
|
||||
TxStatus orm.TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
|
||||
CounterpartChainTx *CounterpartChainTx `json:"counterpart_chain_tx"`
|
||||
ClaimInfo *ClaimInfo `json:"claim_info"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp"`
|
||||
}
|
||||
|
||||
// RenderJSON renders response with json
|
||||
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
|
||||
var errMsg string
|
||||
if err != nil {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
renderData := Response{
|
||||
ErrCode: errCode,
|
||||
ErrMsg: errMsg,
|
||||
Data: data,
|
||||
}
|
||||
ctx.JSON(http.StatusOK, renderData)
|
||||
}
|
||||
|
||||
// RenderSuccess renders success response with json
|
||||
func RenderSuccess(ctx *gin.Context, data interface{}) {
|
||||
RenderJSON(ctx, Success, nil, data)
|
||||
}
|
||||
|
||||
// RenderFailure renders failure response with json
|
||||
func RenderFailure(ctx *gin.Context, errCode int, err error) {
|
||||
RenderJSON(ctx, errCode, err, nil)
|
||||
}
|
||||
|
||||
// RenderFatal renders fatal response with json
|
||||
func RenderFatal(ctx *gin.Context, err error) {
|
||||
var errMsg string
|
||||
if err != nil {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
renderData := Response{
|
||||
ErrCode: InternalServerError,
|
||||
ErrMsg: errMsg,
|
||||
Data: nil,
|
||||
}
|
||||
ctx.Set("errcode", InternalServerError)
|
||||
ctx.JSON(http.StatusInternalServerError, renderData)
|
||||
}
|
||||
218
bridge-history-api/internal/utils/utils.go
Normal file
218
bridge-history-api/internal/utils/utils.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
)
|
||||
|
||||
// Keccak2 compute the keccack256 of two concatenations of bytes32
|
||||
func Keccak2(a common.Hash, b common.Hash) common.Hash {
|
||||
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
|
||||
}
|
||||
|
||||
// GetBlockNumber get the current block number minus the confirmations
|
||||
func GetBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil || number <= confirmations {
|
||||
return 0, err
|
||||
}
|
||||
number = number - confirmations
|
||||
return number, nil
|
||||
}
|
||||
|
||||
// UnpackLog unpacks a retrieved log into the provided output structure.
|
||||
// @todo: add unit test.
|
||||
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
if log.Topics[0] != c.Events[event].ID {
|
||||
return fmt.Errorf("event signature mismatch")
|
||||
}
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var indexed abi.Arguments
|
||||
for _, arg := range c.Events[event].Inputs {
|
||||
if arg.Indexed {
|
||||
indexed = append(indexed, arg)
|
||||
}
|
||||
}
|
||||
return abi.ParseTopics(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
// ComputeMessageHash compute the message hash
|
||||
func ComputeMessageHash(
|
||||
sender common.Address,
|
||||
target common.Address,
|
||||
value *big.Int,
|
||||
messageNonce *big.Int,
|
||||
message []byte,
|
||||
) common.Hash {
|
||||
data, _ := backendabi.IL2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
|
||||
return common.BytesToHash(crypto.Keccak256(data))
|
||||
}
|
||||
|
||||
type commitBatchArgs struct {
|
||||
Version uint8
|
||||
ParentBatchHeader []byte
|
||||
Chunks [][]byte
|
||||
SkippedL1MessageBitmap []byte
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldata find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldata(calldata []byte) (uint64, uint64, error) {
|
||||
method := backendabi.IScrollChainABI.Methods["commitBatch"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
// special case: import genesis batch
|
||||
method = backendabi.IScrollChainABI.Methods["importGenesisBatch"]
|
||||
_, err2 := method.Inputs.Unpack(calldata[4:])
|
||||
if err2 == nil {
|
||||
// genesis batch
|
||||
return 0, 0, nil
|
||||
}
|
||||
// none of "commitBatch" and "importGenesisBatch" match, give up
|
||||
return 0, 0, err
|
||||
}
|
||||
args := commitBatchArgs{}
|
||||
err = method.Inputs.Copy(&args, values)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
var startBlock uint64
|
||||
var finishBlock uint64
|
||||
|
||||
// decode blocks from chunk and assume that there's no empty chunk
|
||||
// | 1 byte | 60 bytes | ... | 60 bytes |
|
||||
// | num blocks | block 1 | ... | block n |
|
||||
if len(args.Chunks) == 0 {
|
||||
return 0, 0, errors.New("invalid chunks")
|
||||
}
|
||||
chunk := args.Chunks[0]
|
||||
block := chunk[1:61] // first block in chunk
|
||||
startBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
chunk = args.Chunks[len(args.Chunks)-1]
|
||||
lastBlockIndex := int(chunk[0]) - 1
|
||||
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
|
||||
finishBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
return startBlock, finishBlock, err
|
||||
}
|
||||
|
||||
// GetL1BlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
func GetL1BlocksInRange(ctx context.Context, cli *ethclient.Client, start, end uint64) ([]*types.Block, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
blocks = make([]*types.Block, end-start+1)
|
||||
concurrency = 32
|
||||
sem = make(chan struct{}, concurrency)
|
||||
)
|
||||
|
||||
for i := start; i <= end; i++ {
|
||||
sem <- struct{}{} // Acquire a slot in the semaphore
|
||||
blockNum := int64(i)
|
||||
index := i - start
|
||||
eg.Go(func() error {
|
||||
defer func() { <-sem }() // Release the slot when done
|
||||
block, err := cli.BlockByNumber(ctx, big.NewInt(blockNum))
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch block number", "number", blockNum, "error", err)
|
||||
return err
|
||||
}
|
||||
blocks[index] = block
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
log.Error("Error waiting for block fetching routines", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// GetL2BlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
func GetL2BlocksInRange(ctx context.Context, cli *ethclient.Client, start, end uint64) ([]*types.BlockWithRowConsumption, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
blocks = make([]*types.BlockWithRowConsumption, end-start+1)
|
||||
concurrency = 32
|
||||
sem = make(chan struct{}, concurrency)
|
||||
)
|
||||
|
||||
for i := start; i <= end; i++ {
|
||||
sem <- struct{}{} // Acquire a slot in the semaphore
|
||||
blockNum := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(i))
|
||||
index := i - start
|
||||
eg.Go(func() error {
|
||||
defer func() { <-sem }() // Release the slot when done
|
||||
block, err := cli.GetBlockByNumberOrHash(ctx, blockNum)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch block number", "number", blockNum, "error", err)
|
||||
return err
|
||||
}
|
||||
blocks[index] = block
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
log.Error("Error waiting for block fetching routines", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// ConvertBigIntArrayToString convert the big int array to string
|
||||
func ConvertBigIntArrayToString(array []*big.Int) string {
|
||||
stringArray := make([]string, len(array))
|
||||
for i, num := range array {
|
||||
stringArray[i] = num.String()
|
||||
}
|
||||
|
||||
result := strings.Join(stringArray, ", ")
|
||||
return result
|
||||
}
|
||||
|
||||
// ConvertStringToStringArray takes a string with values separated by commas and returns a slice of strings
|
||||
func ConvertStringToStringArray(s string) []string {
|
||||
if s == "" {
|
||||
return []string{}
|
||||
}
|
||||
stringParts := strings.Split(s, ",")
|
||||
for i, part := range stringParts {
|
||||
stringParts[i] = strings.TrimSpace(part)
|
||||
}
|
||||
return stringParts
|
||||
}
|
||||
|
||||
// GetSkippedQueueIndices gets the skipped queue indices
|
||||
func GetSkippedQueueIndices(startIndex uint64, skippedBitmap *big.Int) []uint64 {
|
||||
var indices []uint64
|
||||
for i := 0; i < 256; i++ {
|
||||
index := startIndex + uint64(i)
|
||||
bit := new(big.Int).Rsh(skippedBitmap, uint(i))
|
||||
if bit.Bit(0) == 0 {
|
||||
continue
|
||||
}
|
||||
indices = append(indices, index)
|
||||
}
|
||||
return indices
|
||||
}
|
||||
91
bridge-history-api/internal/utils/utils_test.go
Normal file
91
bridge-history-api/internal/utils/utils_test.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestKeccak2(t *testing.T) {
|
||||
a := common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0")
|
||||
b := common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c")
|
||||
c := Keccak2(a, b)
|
||||
assert.NotEmpty(t, c)
|
||||
assert.NotEqual(t, a, c)
|
||||
assert.NotEqual(t, b, c)
|
||||
assert.Equal(t, "0xc0ffbd7f501bd3d49721b0724b2bff657cb2378f15d5a9b97cd7ea5bf630d512", c.Hex())
|
||||
}
|
||||
|
||||
func TestGetBatchRangeFromCalldata(t *testing.T) {
|
||||
// single chunk
|
||||
start, finish, err := GetBatchRangeFromCalldata(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003d0100000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000100000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, start, uint64(1))
|
||||
assert.Equal(t, finish, uint64(1))
|
||||
|
||||
// multiple chunk
|
||||
start, finish, err = GetBatchRangeFromCalldata(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000007900000000000000000100000000000000010000000000000001038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000004c01000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030000000000010000000001000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b403000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005000300000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00050000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c01000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, start, uint64(10))
|
||||
assert.Equal(t, finish, uint64(20))
|
||||
|
||||
// genesis batch
|
||||
start, finish, err = GetBatchRangeFromCalldata(common.Hex2Bytes("3fdeecb200000000000000000000000000000000000000000000000000000000000000402dcb5308098d24a37fc1487a229fcedb09fa4343ede39cbad365bc925535bb09000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000c252bc9780c4d83cf11f14b8cd03c92c4d18ce07710ba836d31d12da216c8330000000000000000000000000000000000000000000000000000000000000000000000000000000"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, start, uint64(0))
|
||||
assert.Equal(t, finish, uint64(0))
|
||||
}
|
||||
|
||||
// TestConvertBigIntArrayToString tests the ConvertBigIntArrayToString function
|
||||
func TestConvertBigIntArrayToString(t *testing.T) {
|
||||
tests := []struct {
|
||||
array []*big.Int
|
||||
expected string
|
||||
}{
|
||||
{[]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, "1, 2, 3"},
|
||||
{[]*big.Int{big.NewInt(0), big.NewInt(-1)}, "0, -1"},
|
||||
{[]*big.Int{}, ""},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := ConvertBigIntArrayToString(test.array)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestConvertStringToStringArray tests the ConvertStringToStringArray function
|
||||
func TestConvertStringToStringArray(t *testing.T) {
|
||||
tests := []struct {
|
||||
s string
|
||||
expected []string
|
||||
}{
|
||||
{"1, 2, 3", []string{"1", "2", "3"}},
|
||||
{" 4 , 5 , 6 ", []string{"4", "5", "6"}},
|
||||
{"", []string{}},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := ConvertStringToStringArray(test.s)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetSkippedQueueIndices tests the GetSkippedQueueIndices function
|
||||
func TestGetSkippedQueueIndices(t *testing.T) {
|
||||
tests := []struct {
|
||||
startIndex uint64
|
||||
bitmap *big.Int
|
||||
expected []uint64
|
||||
}{
|
||||
{0, big.NewInt(0b101), []uint64{0, 2}},
|
||||
{10, big.NewInt(0b110), []uint64{11, 12}},
|
||||
{0, big.NewInt(0), nil}, // No bits set
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := GetSkippedQueueIndices(test.startIndex, test.bitmap)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,10 @@
|
||||
package messageproof
|
||||
package utils
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
||||
"bridge-history-api/utils"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
)
|
||||
|
||||
// MaxHeight is the maixium possible height of withdraw trie
|
||||
// MaxHeight is the maximum possible height of withdrawal trie
|
||||
const MaxHeight = 40
|
||||
|
||||
// WithdrawTrie is an append only merkle trie
|
||||
@@ -27,7 +25,7 @@ func NewWithdrawTrie() *WithdrawTrie {
|
||||
|
||||
zeroes[0] = common.Hash{}
|
||||
for i := 1; i < MaxHeight; i++ {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
zeroes[i] = Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
return &WithdrawTrie{
|
||||
@@ -40,14 +38,15 @@ func NewWithdrawTrie() *WithdrawTrie {
|
||||
|
||||
// Initialize will initialize the merkle trie with rightest leaf node
|
||||
func (w *WithdrawTrie) Initialize(currentMessageNonce uint64, msgHash common.Hash, proofBytes []byte) {
|
||||
proof := DecodeBytesToMerkleProof(proofBytes)
|
||||
branches := RecoverBranchFromProof(proof, currentMessageNonce, msgHash)
|
||||
proof := decodeBytesToMerkleProof(proofBytes)
|
||||
branches := recoverBranchFromProof(proof, currentMessageNonce, msgHash)
|
||||
w.height = len(proof)
|
||||
w.branches = branches
|
||||
w.NextMessageNonce = currentMessageNonce + 1
|
||||
}
|
||||
|
||||
// AppendMessages appends a list of new messages as leaf nodes to the rightest of the tree and returns the proofs for all messages.
|
||||
// The function correctly returns the proofs for the entire tree after all messages have been inserted, not the individual proofs after each insertion.
|
||||
func (w *WithdrawTrie) AppendMessages(hashes []common.Hash) [][]byte {
|
||||
length := len(hashes)
|
||||
if length == 0 {
|
||||
@@ -87,7 +86,7 @@ func (w *WithdrawTrie) AppendMessages(hashes []common.Hash) [][]byte {
|
||||
cache[h][maxIndex^1] = w.zeroes[h]
|
||||
}
|
||||
for i := minIndex; i <= maxIndex; i += 2 {
|
||||
cache[h+1][i>>1] = utils.Keccak2(cache[h][i], cache[h][i^1])
|
||||
cache[h+1][i>>1] = Keccak2(cache[h][i], cache[h][i^1])
|
||||
}
|
||||
minIndex >>= 1
|
||||
maxIndex >>= 1
|
||||
@@ -95,7 +94,7 @@ func (w *WithdrawTrie) AppendMessages(hashes []common.Hash) [][]byte {
|
||||
|
||||
// update branches using hashes one by one
|
||||
for i := 0; i < length; i++ {
|
||||
proof := UpdateBranchWithNewMessage(w.zeroes, w.branches, w.NextMessageNonce, hashes[i])
|
||||
proof := updateBranchWithNewMessage(w.zeroes, w.branches, w.NextMessageNonce, hashes[i])
|
||||
w.NextMessageNonce++
|
||||
w.height = len(proof)
|
||||
}
|
||||
@@ -109,7 +108,7 @@ func (w *WithdrawTrie) AppendMessages(hashes []common.Hash) [][]byte {
|
||||
merkleProof = append(merkleProof, cache[h][index^1])
|
||||
index >>= 1
|
||||
}
|
||||
proofs[i] = EncodeMerkleProofToBytes(merkleProof)
|
||||
proofs[i] = encodeMerkleProofToBytes(merkleProof)
|
||||
}
|
||||
|
||||
return proofs
|
||||
@@ -123,8 +122,8 @@ func (w *WithdrawTrie) MessageRoot() common.Hash {
|
||||
return w.branches[w.height]
|
||||
}
|
||||
|
||||
// DecodeBytesToMerkleProof transfer byte array to bytes32 array. The caller should make sure the length is matched.
|
||||
func DecodeBytesToMerkleProof(proofBytes []byte) []common.Hash {
|
||||
// decodeBytesToMerkleProof transfer byte array to bytes32 array. The caller should make sure the length is matched.
|
||||
func decodeBytesToMerkleProof(proofBytes []byte) []common.Hash {
|
||||
proof := make([]common.Hash, len(proofBytes)/32)
|
||||
for i := 0; i < len(proofBytes); i += 32 {
|
||||
proof[i/32] = common.BytesToHash(proofBytes[i : i+32])
|
||||
@@ -132,8 +131,8 @@ func DecodeBytesToMerkleProof(proofBytes []byte) []common.Hash {
|
||||
return proof
|
||||
}
|
||||
|
||||
// EncodeMerkleProofToBytes transfer byte32 array to byte array by concatenation.
|
||||
func EncodeMerkleProofToBytes(proof []common.Hash) []byte {
|
||||
// encodeMerkleProofToBytes transfer byte32 array to byte array by concatenation.
|
||||
func encodeMerkleProofToBytes(proof []common.Hash) []byte {
|
||||
var proofBytes []byte
|
||||
for i := 0; i < len(proof); i++ {
|
||||
proofBytes = append(proofBytes, proof[i][:]...)
|
||||
@@ -141,8 +140,8 @@ func EncodeMerkleProofToBytes(proof []common.Hash) []byte {
|
||||
return proofBytes
|
||||
}
|
||||
|
||||
// UpdateBranchWithNewMessage update the branches to latest with new message and return the merkle proof for the message.
|
||||
func UpdateBranchWithNewMessage(zeroes []common.Hash, branches []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
|
||||
// updateBranchWithNewMessage update the branches to latest with new message and return the merkle proof for the message.
|
||||
func updateBranchWithNewMessage(zeroes []common.Hash, branches []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
|
||||
root := msgHash
|
||||
var merkleProof []common.Hash
|
||||
var height uint64
|
||||
@@ -152,10 +151,10 @@ func UpdateBranchWithNewMessage(zeroes []common.Hash, branches []common.Hash, in
|
||||
branches[height] = root
|
||||
merkleProof = append(merkleProof, zeroes[height])
|
||||
// it's a left child, the right child must be null
|
||||
root = utils.Keccak2(root, zeroes[height])
|
||||
root = Keccak2(root, zeroes[height])
|
||||
} else {
|
||||
// it's a right child, use previously computed hash
|
||||
root = utils.Keccak2(branches[height], root)
|
||||
root = Keccak2(branches[height], root)
|
||||
merkleProof = append(merkleProof, branches[height])
|
||||
}
|
||||
index >>= 1
|
||||
@@ -164,8 +163,8 @@ func UpdateBranchWithNewMessage(zeroes []common.Hash, branches []common.Hash, in
|
||||
return merkleProof
|
||||
}
|
||||
|
||||
// RecoverBranchFromProof will recover latest branches from merkle proof and message hash
|
||||
func RecoverBranchFromProof(proof []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
|
||||
// recoverBranchFromProof will recover latest branches from merkle proof and message hash
|
||||
func recoverBranchFromProof(proof []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
|
||||
branches := make([]common.Hash, 64)
|
||||
root := msgHash
|
||||
var height uint64
|
||||
@@ -173,11 +172,11 @@ func RecoverBranchFromProof(proof []common.Hash, index uint64, msgHash common.Ha
|
||||
if index%2 == 0 {
|
||||
branches[height] = root
|
||||
// it's a left child, the right child must be null
|
||||
root = utils.Keccak2(root, proof[height])
|
||||
root = Keccak2(root, proof[height])
|
||||
} else {
|
||||
// it's a right child, use previously computed hash
|
||||
branches[height] = proof[height]
|
||||
root = utils.Keccak2(proof[height], root)
|
||||
root = Keccak2(proof[height], root)
|
||||
}
|
||||
index >>= 1
|
||||
}
|
||||
@@ -1,13 +1,11 @@
|
||||
package messageproof
|
||||
package utils
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
func TestUpdateBranchWithNewMessage(t *testing.T) {
|
||||
@@ -15,32 +13,32 @@ func TestUpdateBranchWithNewMessage(t *testing.T) {
|
||||
branches := make([]common.Hash, 64)
|
||||
zeroes[0] = common.Hash{}
|
||||
for i := 1; i < 64; i++ {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
zeroes[i] = Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
updateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if branches[0] != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0x0000000000000000000000000000000000000000000000000000000000000001", branches[0].Hex())
|
||||
}
|
||||
|
||||
UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
updateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
if branches[1] != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", branches[1].Hex())
|
||||
}
|
||||
|
||||
UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
updateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
if branches[2] != common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c", branches[2].Hex())
|
||||
}
|
||||
|
||||
UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
updateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
if branches[2] != common.HexToHash("0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36", branches[2].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeEncodeMerkleProof(t *testing.T) {
|
||||
proof := DecodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
|
||||
proof := decodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
|
||||
if len(proof) != 4 {
|
||||
t.Fatalf("proof length mismatch, want %d, got %d", 4, len(proof))
|
||||
}
|
||||
@@ -57,7 +55,7 @@ func TestDecodeEncodeMerkleProof(t *testing.T) {
|
||||
t.Fatalf("proof[3] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904", proof[0].Hex())
|
||||
}
|
||||
|
||||
bytes := EncodeMerkleProofToBytes(proof)
|
||||
bytes := encodeMerkleProofToBytes(proof)
|
||||
if common.Bytes2Hex(bytes) != "2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904" {
|
||||
t.Fatalf("wrong encoded bytes")
|
||||
}
|
||||
@@ -68,35 +66,35 @@ func TestRecoverBranchFromProof(t *testing.T) {
|
||||
branches := make([]common.Hash, 64)
|
||||
zeroes[0] = common.Hash{}
|
||||
for i := 1; i < 64; i++ {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
zeroes[i] = Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
proof := UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
tmpBranches := RecoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
proof := updateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
tmpBranches := recoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
tmpBranches = RecoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
proof = updateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
tmpBranches = recoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
tmpBranches = RecoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
proof = updateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
tmpBranches = recoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
tmpBranches = RecoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
proof = updateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
tmpBranches = recoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
@@ -125,7 +123,7 @@ func TestWithdrawTrieOneByOne(t *testing.T) {
|
||||
})
|
||||
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(i+1))
|
||||
assert.Equal(t, expectedRoot.String(), withdrawTrie.MessageRoot().String())
|
||||
proof := DecodeBytesToMerkleProof(proofBytes[0])
|
||||
proof := decodeBytesToMerkleProof(proofBytes[0])
|
||||
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
|
||||
assert.Equal(t, expectedRoot.String(), verifiedRoot.String())
|
||||
}
|
||||
@@ -166,7 +164,7 @@ func TestWithdrawTrieMultiple(t *testing.T) {
|
||||
|
||||
for i := initial; i <= finish; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
proof := DecodeBytesToMerkleProof(proofBytes[i-initial])
|
||||
proof := decodeBytesToMerkleProof(proofBytes[i-initial])
|
||||
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
|
||||
assert.Equal(t, expectedRoots[finish].String(), verifiedRoot.String())
|
||||
}
|
||||
@@ -178,9 +176,9 @@ func verifyMerkleProof(index uint64, leaf common.Hash, proof []common.Hash) comm
|
||||
root := leaf
|
||||
for _, h := range proof {
|
||||
if index%2 == 0 {
|
||||
root = utils.Keccak2(root, h)
|
||||
root = Keccak2(root, h)
|
||||
} else {
|
||||
root = utils.Keccak2(h, root)
|
||||
root = Keccak2(h, root)
|
||||
}
|
||||
index >>= 1
|
||||
}
|
||||
@@ -200,13 +198,13 @@ func computeMerkleRoot(hashes []common.Hash) common.Hash {
|
||||
var newHashes []common.Hash
|
||||
for i := 0; i < len(hashes); i += 2 {
|
||||
if i+1 < len(hashes) {
|
||||
newHashes = append(newHashes, utils.Keccak2(hashes[i], hashes[i+1]))
|
||||
newHashes = append(newHashes, Keccak2(hashes[i], hashes[i+1]))
|
||||
} else {
|
||||
newHashes = append(newHashes, utils.Keccak2(hashes[i], zeroHash))
|
||||
newHashes = append(newHashes, Keccak2(hashes[i], zeroHash))
|
||||
}
|
||||
}
|
||||
hashes = newHashes
|
||||
zeroHash = utils.Keccak2(zeroHash, zeroHash)
|
||||
zeroHash = Keccak2(zeroHash, zeroHash)
|
||||
}
|
||||
return hashes[0]
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// RollupBatch is the struct for rollup_batch table
|
||||
type RollupBatch struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id"`
|
||||
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash"`
|
||||
CommitHeight uint64 `json:"commit_height" gorm:"column:commit_height"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root;default:NULL"`
|
||||
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewRollupBatch create an RollupBatch instance
|
||||
func NewRollupBatch(db *gorm.DB) *RollupBatch {
|
||||
return &RollupBatch{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the Batch model.
|
||||
func (*RollupBatch) TableName() string {
|
||||
return "rollup_batch"
|
||||
}
|
||||
|
||||
// GetLatestRollupBatchProcessedHeight return latest processed height from rollup_batch table
|
||||
func (r *RollupBatch) GetLatestRollupBatchProcessedHeight(ctx context.Context) (uint64, error) {
|
||||
var result RollupBatch
|
||||
err := r.db.WithContext(ctx).Unscoped().Select("commit_height").Order("id desc").First(&result).Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("RollupBatch.GetLatestRollupBatchProcessedHeight error: %w", err)
|
||||
}
|
||||
return result.CommitHeight, nil
|
||||
}
|
||||
|
||||
// GetLatestRollupBatch return the latest rollup batch in db
|
||||
func (r *RollupBatch) GetLatestRollupBatch(ctx context.Context) (*RollupBatch, error) {
|
||||
var result RollupBatch
|
||||
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_hash is not NULL").Order("batch_index desc").First(&result).Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("RollupBatch.GetLatestRollupBatch error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetRollupBatchByIndex return the rollup batch by index
|
||||
func (r *RollupBatch) GetRollupBatchByIndex(ctx context.Context, index uint64) (*RollupBatch, error) {
|
||||
var result RollupBatch
|
||||
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_index = ?", index).First(&result).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("RollupBatch.GetRollupBatchByIndex error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// InsertRollupBatch batch insert rollup batch into db and return the transaction
|
||||
func (r *RollupBatch) InsertRollupBatch(ctx context.Context, batches []*RollupBatch, dbTx ...*gorm.DB) error {
|
||||
if len(batches) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := r.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
err := db.WithContext(ctx).Model(&RollupBatch{}).Create(&batches).Error
|
||||
if err != nil {
|
||||
batchIndexes := make([]uint64, 0, len(batches))
|
||||
heights := make([]uint64, 0, len(batches))
|
||||
for _, batch := range batches {
|
||||
batchIndexes = append(batchIndexes, batch.BatchIndex)
|
||||
heights = append(heights, batch.CommitHeight)
|
||||
}
|
||||
log.Error("failed to insert rollup batch", "batchIndexes", batchIndexes, "heights", heights)
|
||||
return fmt.Errorf("RollupBatch.InsertRollupBatch error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateRollupBatchWithdrawRoot updates the withdraw_root column in rollup_batch table
|
||||
func (r *RollupBatch) UpdateRollupBatchWithdrawRoot(ctx context.Context, batchIndex uint64, withdrawRoot string) error {
|
||||
err := r.db.WithContext(ctx).Model(&RollupBatch{}).Where("batch_index = ?", batchIndex).Update("withdraw_root", withdrawRoot).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("RollupBatch.UpdateRuollupBatch error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,370 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// AssetType can be ETH/ERC20/ERC1155/ERC721
|
||||
type AssetType int
|
||||
|
||||
// MsgType can be layer1/layer2 msg
|
||||
type MsgType int
|
||||
|
||||
func (a AssetType) String() string {
|
||||
switch a {
|
||||
case ETH:
|
||||
return "ETH"
|
||||
case ERC20:
|
||||
return "ERC20"
|
||||
case ERC1155:
|
||||
return "ERC1155"
|
||||
case ERC721:
|
||||
return "ERC721"
|
||||
}
|
||||
return "Unknown Asset Type"
|
||||
}
|
||||
|
||||
const (
|
||||
// ETH = 0
|
||||
ETH AssetType = iota
|
||||
// ERC20 = 1
|
||||
ERC20
|
||||
// ERC721 = 2
|
||||
ERC721
|
||||
// ERC1155 = 3
|
||||
ERC1155
|
||||
)
|
||||
|
||||
const (
|
||||
// UnknownMsg = 0
|
||||
UnknownMsg MsgType = iota
|
||||
// Layer1Msg = 1
|
||||
Layer1Msg
|
||||
// Layer2Msg = 2
|
||||
Layer2Msg
|
||||
)
|
||||
|
||||
// CrossMsg represents a cross message from layer 1 to layer 2
|
||||
type CrossMsg struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id"`
|
||||
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
|
||||
Height uint64 `json:"height" gorm:"column:height"`
|
||||
Sender string `json:"sender" gorm:"column:sender"`
|
||||
Target string `json:"target" gorm:"column:target"`
|
||||
Amount string `json:"amount" gorm:"column:amount"`
|
||||
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:''"`
|
||||
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:''"`
|
||||
Layer1Token string `json:"layer1_token" gorm:"column:layer1_token;default:''"`
|
||||
Layer2Token string `json:"layer2_token" gorm:"column:layer2_token;default:''"`
|
||||
TokenIDs string `json:"token_ids" gorm:"column:token_ids;default:''"`
|
||||
TokenAmounts string `json:"token_amounts" gorm:"column:token_amounts;default:''"`
|
||||
Asset int `json:"asset" gorm:"column:asset"`
|
||||
MsgType int `json:"msg_type" gorm:"column:msg_type"`
|
||||
Timestamp *time.Time `json:"timestamp" gorm:"column:block_timestamp;default;NULL"`
|
||||
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// TableName returns the table name for the CrossMsg model.
|
||||
func (*CrossMsg) TableName() string {
|
||||
return "cross_message"
|
||||
}
|
||||
|
||||
// NewCrossMsg returns a new instance of CrossMsg.
|
||||
func NewCrossMsg(db *gorm.DB) *CrossMsg {
|
||||
return &CrossMsg{db: db}
|
||||
}
|
||||
|
||||
// L1 Cross Msgs Operations
|
||||
|
||||
// GetL1CrossMsgByHash returns layer1 cross message by given hash
|
||||
func (c *CrossMsg) GetL1CrossMsgByHash(ctx context.Context, l1Hash common.Hash) (*CrossMsg, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer1_hash = ? AND msg_type = ?", l1Hash.String(), Layer1Msg).First(&result).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("CrossMsg.GetL1CrossMsgByHash error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetLatestL1ProcessedHeight returns the latest processed height of layer1 cross messages
|
||||
func (c *CrossMsg) GetLatestL1ProcessedHeight(ctx context.Context) (uint64, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("msg_type = ?", Layer1Msg).
|
||||
Select("height").
|
||||
Order("id DESC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("CrossMsg.GetLatestL1ProcessedHeight error: %w", err)
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// GetL1EarliestNoBlockTimestampHeight returns the earliest layer1 cross message height which has no block timestamp
|
||||
func (c *CrossMsg) GetL1EarliestNoBlockTimestampHeight(ctx context.Context) (uint64, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("block_timestamp IS NULL AND msg_type = ?", Layer1Msg).
|
||||
Select("height").
|
||||
Order("height ASC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("CrossMsg.GetL1EarliestNoBlockTimestampHeight error: %w", err)
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// InsertL1CrossMsg batch insert layer1 cross messages into db
|
||||
func (c *CrossMsg) InsertL1CrossMsg(ctx context.Context, messages []*CrossMsg, dbTx ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&CrossMsg{}).Create(&messages).Error
|
||||
if err != nil {
|
||||
l1hashes := make([]string, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
l1hashes = append(l1hashes, msg.Layer1Hash)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert l1 cross messages", "l1hashes", l1hashes, "heights", heights, "err", err)
|
||||
return fmt.Errorf("CrossMsg.InsertL1CrossMsg error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL1CrossMsgHash update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
|
||||
func (c *CrossMsg) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash, dbTx ...*gorm.DB) error {
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := c.db.Model(&CrossMsg{}).Where("layer1_hash = ?", l1Hash.Hex()).Update("msg_hash", msgHash.Hex()).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.UpdateL1CrossMsgHash error: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// UpdateL1BlockTimestamp update layer1 block timestamp
|
||||
func (c *CrossMsg) UpdateL1BlockTimestamp(ctx context.Context, height uint64, timestamp time.Time) error {
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("height = ? AND msg_type = ?", height, Layer1Msg).
|
||||
Update("block_timestamp", timestamp).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.UpdateL1BlockTimestamp error: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteL1CrossMsgAfterHeight soft delete layer1 cross messages after given height
|
||||
func (c *CrossMsg) DeleteL1CrossMsgAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Delete(&CrossMsg{}, "height > ? AND msg_type = ?", height, Layer1Msg).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.DeleteL1CrossMsgAfterHeight error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// L2 Cross Msgs Operations
|
||||
|
||||
// GetL2CrossMsgByHash returns layer2 cross message by given hash
|
||||
func (c *CrossMsg) GetL2CrossMsgByHash(ctx context.Context, l2Hash common.Hash) (*CrossMsg, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer2_hash = ? AND msg_type = ?", l2Hash.String(), Layer2Msg).First(&result).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("CrossMsg.GetL2CrossMsgByHash error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetLatestL2ProcessedHeight returns the latest processed height of layer2 cross messages
|
||||
func (c *CrossMsg) GetLatestL2ProcessedHeight(ctx context.Context) (uint64, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Select("height").
|
||||
Where("msg_type = ?", Layer2Msg).
|
||||
Order("id DESC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("CrossMsg.GetLatestL2ProcessedHeight error: %w", err)
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// GetL2CrossMsgByMsgHashList returns layer2 cross messages under given msg hashes
|
||||
func (c *CrossMsg) GetL2CrossMsgByMsgHashList(ctx context.Context, msgHashList []string) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("msg_hash IN (?) AND msg_type = ?", msgHashList, Layer2Msg).
|
||||
Find(&results).
|
||||
Error
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CrossMsg.GetL2CrossMsgByMsgHashList error: %w", err)
|
||||
}
|
||||
if len(results) == 0 {
|
||||
log.Debug("no CrossMsg under given msg hashes", "msg hash list", msgHashList)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetL2EarliestNoBlockTimestampHeight returns the earliest layer2 cross message height which has no block timestamp
|
||||
func (c *CrossMsg) GetL2EarliestNoBlockTimestampHeight(ctx context.Context) (uint64, error) {
|
||||
var result CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("block_timestamp IS NULL AND msg_type = ?", Layer2Msg).
|
||||
Select("height").
|
||||
Order("height ASC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("CrossMsg.GetL2EarliestNoBlockTimestampHeight error: %w", err)
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// InsertL2CrossMsg batch insert layer2 cross messages
|
||||
func (c *CrossMsg) InsertL2CrossMsg(ctx context.Context, messages []*CrossMsg, dbTx ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&CrossMsg{}).Create(&messages).Error
|
||||
if err != nil {
|
||||
l2hashes := make([]string, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
l2hashes = append(l2hashes, msg.Layer2Hash)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert l2 cross messages", "l2hashes", l2hashes, "heights", heights, "err", err)
|
||||
return fmt.Errorf("CrossMsg.InsertL2CrossMsg error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL2CrossMsgHash update layer2 cross message hash
|
||||
func (c *CrossMsg) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash, dbTx ...*gorm.DB) error {
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&CrossMsg{}).
|
||||
Where("layer2_hash = ?", l2Hash.String()).
|
||||
Update("msg_hash", msgHash.String()).
|
||||
Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.UpdateL2CrossMsgHash error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL2BlockTimestamp update layer2 cross message block timestamp
|
||||
func (c *CrossMsg) UpdateL2BlockTimestamp(ctx context.Context, height uint64, timestamp time.Time) error {
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("height = ? AND msg_type = ?", height, Layer2Msg).
|
||||
Update("block_timestamp", timestamp).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.UpdateL2BlockTimestamp error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteL2CrossMsgFromHeight delete layer2 cross messages from given height
|
||||
func (c *CrossMsg) DeleteL2CrossMsgFromHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
|
||||
db := c.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&CrossMsg{}).Delete("height > ? AND msg_type = ?", height, Layer2Msg).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("CrossMsg.DeleteL2CrossMsgFromHeight error: %w", err)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// General Operations
|
||||
|
||||
// GetTotalCrossMsgCountByAddress get total cross msg count by address
|
||||
func (c *CrossMsg) GetTotalCrossMsgCountByAddress(ctx context.Context, sender string) (uint64, error) {
|
||||
var count int64
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("sender = ?", sender).
|
||||
Count(&count).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("CrossMsg.GetTotalCrossMsgCountByAddress error: %w", err)
|
||||
|
||||
}
|
||||
return uint64(count), nil
|
||||
}
|
||||
|
||||
// GetCrossMsgsByAddressWithOffset get cross msgs by address with offset
|
||||
func (c *CrossMsg) GetCrossMsgsByAddressWithOffset(ctx context.Context, sender string, offset int, limit int) ([]CrossMsg, error) {
|
||||
var messages []CrossMsg
|
||||
err := c.db.WithContext(ctx).Model(&CrossMsg{}).
|
||||
Where("sender = ?", sender).
|
||||
Order("block_timestamp DESC NULLS FIRST, id DESC").
|
||||
Limit(limit).
|
||||
Offset(offset).
|
||||
Find(&messages).
|
||||
Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CrossMsg.GetCrossMsgsByAddressWithOffset error: %w", err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
@@ -1,219 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// L2SentMsg defines the struct for l2_sent_msg table record
|
||||
type L2SentMsg struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id"`
|
||||
OriginalSender string `json:"original_sender" gorm:"column:original_sender;default:''"`
|
||||
TxHash string `json:"tx_hash" gorm:"column:tx_hash"`
|
||||
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
|
||||
Sender string `json:"sender" gorm:"column:sender"`
|
||||
Target string `json:"target" gorm:"column:target"`
|
||||
Value string `json:"value" gorm:"column:value"`
|
||||
Height uint64 `json:"height" gorm:"column:height"`
|
||||
Nonce uint64 `json:"nonce" gorm:"column:nonce"`
|
||||
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index;default:0"`
|
||||
MsgProof string `json:"msg_proof" gorm:"column:msg_proof;default:''"`
|
||||
MsgData string `json:"msg_data" gorm:"column:msg_data;default:''"`
|
||||
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewL2SentMsg create an NewL2SentMsg instance
|
||||
func NewL2SentMsg(db *gorm.DB) *L2SentMsg {
|
||||
return &L2SentMsg{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the L2SentMsg model.
|
||||
func (*L2SentMsg) TableName() string {
|
||||
return "l2_sent_msg"
|
||||
}
|
||||
|
||||
// GetL2SentMsgByHash get l2 sent msg by hash
|
||||
func (l *L2SentMsg) GetL2SentMsgByHash(ctx context.Context, msgHash string) (*L2SentMsg, error) {
|
||||
var result L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("msg_hash = ?", msgHash).
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("L2SentMsg.GetL2SentMsgByHash error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetLatestSentMsgHeightOnL2 get latest sent msg height on l2
|
||||
func (l *L2SentMsg) GetLatestSentMsgHeightOnL2(ctx context.Context) (uint64, error) {
|
||||
var result L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Select("height").
|
||||
Order("nonce DESC").
|
||||
First(&result).Error
|
||||
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("L2SentMsg.GetLatestSentMsgHeightOnL2 error: %w", err)
|
||||
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// GetClaimableL2SentMsgByAddressWithOffset get claimable l2 sent msg by address with offset
|
||||
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressWithOffset(ctx context.Context, address string, offset int, limit int) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
err := l.db.WithContext(ctx).Raw(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='' ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset).
|
||||
Scan(&results).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressWithOffset error: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetClaimableL2SentMsgByAddressTotalNum get claimable l2 sent msg by address total num
|
||||
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressTotalNum(ctx context.Context, address string) (uint64, error) {
|
||||
var count uint64
|
||||
err := l.db.WithContext(ctx).Raw(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='';`, address).
|
||||
Scan(&count).Error
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressTotalNum error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetLatestL2SentMsgBatchIndex get latest l2 sent msg batch index
|
||||
func (l *L2SentMsg) GetLatestL2SentMsgBatchIndex(ctx context.Context) (int64, error) {
|
||||
var result L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("batch_index != 0").
|
||||
Order("batch_index DESC").
|
||||
Select("batch_index").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return -1, nil
|
||||
}
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgBatchIndex error: %w", err)
|
||||
}
|
||||
// Watch for overflow, tho its not likely to happen
|
||||
return int64(result.BatchIndex), nil
|
||||
}
|
||||
|
||||
// GetL2SentMsgMsgHashByHeightRange get l2 sent msg msg hash by height range
|
||||
func (l *L2SentMsg) GetL2SentMsgMsgHashByHeightRange(ctx context.Context, startHeight, endHeight uint64) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("height >= ? AND height <= ?", startHeight, endHeight).
|
||||
Order("nonce ASC").
|
||||
Find(&results).
|
||||
Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("L2SentMsg.GetL2SentMsgMsgHashByHeightRange error: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetL2SentMessageByNonce get l2 sent message by nonce
|
||||
func (l *L2SentMsg) GetL2SentMessageByNonce(ctx context.Context, nonce uint64) (*L2SentMsg, error) {
|
||||
var result L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("nonce = ?", nonce).
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("L2SentMsg.GetL2SentMessageByNonce error: %w", err)
|
||||
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetLatestL2SentMsgLEHeight get latest l2 sent msg less than or equal to end block number
|
||||
func (l *L2SentMsg) GetLatestL2SentMsgLEHeight(ctx context.Context, endBlockNumber uint64) (*L2SentMsg, error) {
|
||||
var result L2SentMsg
|
||||
err := l.db.WithContext(ctx).Model(&L2SentMsg{}).
|
||||
Where("height <= ?", endBlockNumber).
|
||||
Order("nonce DESC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgLEHeight error: %w", err)
|
||||
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// InsertL2SentMsg batch insert l2 sent msg
|
||||
func (l *L2SentMsg) InsertL2SentMsg(ctx context.Context, messages []*L2SentMsg, dbTx ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := l.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&L2SentMsg{}).Create(&messages).Error
|
||||
if err != nil {
|
||||
l2hashes := make([]string, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
l2hashes = append(l2hashes, msg.TxHash)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert l2 sent messages", "l2hashes", l2hashes, "heights", heights, "err", err)
|
||||
return fmt.Errorf("L2SentMsg.InsertL2SentMsg error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL2MessageProof update l2 message proof in db tx
|
||||
func (l *L2SentMsg) UpdateL2MessageProof(ctx context.Context, msgHash string, proof string, batchIndex uint64, dbTx ...*gorm.DB) error {
|
||||
db := l.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&L2SentMsg{}).
|
||||
Where("msg_hash = ?", msgHash).
|
||||
Updates(map[string]interface{}{
|
||||
"msg_proof": proof,
|
||||
"batch_index": batchIndex,
|
||||
}).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("L2SentMsg.UpdateL2MessageProof error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteL2SentMsgAfterHeight delete l2 sent msg after height
|
||||
func (l *L2SentMsg) DeleteL2SentMsgAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
|
||||
db := l.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
err := db.WithContext(ctx).Model(&L2SentMsg{}).Delete("height > ?", height).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("L2SentMsg.DeleteL2SentMsgAfterHeight error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table cross_message
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
amount VARCHAR NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer1_token VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_token VARCHAR NOT NULL DEFAULT '',
|
||||
asset SMALLINT NOT NULL,
|
||||
msg_type SMALLINT NOT NULL,
|
||||
token_ids TEXT NOT NULL DEFAULT '',
|
||||
token_amounts TEXT NOT NULL DEFAULT '',
|
||||
block_timestamp TIMESTAMP(0) DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_msg_hash_msg_type
|
||||
on cross_message (msg_hash, msg_type) where deleted_at IS NULL;
|
||||
|
||||
comment
|
||||
on column cross_message.asset is 'ETH, ERC20, ERC721, ERC1155';
|
||||
|
||||
comment
|
||||
on column cross_message.msg_type is 'unknown, l1msg, l2msg';
|
||||
|
||||
CREATE INDEX idx_l1_msg_index ON cross_message (layer1_hash, deleted_at);
|
||||
|
||||
CREATE INDEX idx_l2_msg_index ON cross_message (layer2_hash, deleted_at);
|
||||
|
||||
CREATE INDEX idx_height_msg_type_index ON cross_message (height, msg_type, deleted_at);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON cross_message FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists cross_message;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,41 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table relayed_msg
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_hash VARCHAR NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_msg_hash_l1_hash_l2_hash
|
||||
on relayed_msg (msg_hash, layer1_hash, layer2_hash) where deleted_at IS NULL;
|
||||
|
||||
CREATE INDEX idx_l1_msg_relayed_msg ON relayed_msg (layer1_hash, deleted_at);
|
||||
|
||||
CREATE INDEX idx_l2_msg_relayed_msg ON relayed_msg (layer2_hash, deleted_at);
|
||||
|
||||
CREATE INDEX idx_msg_hash_deleted_at_relayed_msg on relayed_msg (msg_hash, deleted_at);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON relayed_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists relayed_msg;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,47 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table l2_sent_msg
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
original_sender VARCHAR NOT NULL DEFAULT '',
|
||||
tx_hash VARCHAR NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
nonce BIGINT NOT NULL,
|
||||
batch_index BIGINT NOT NULL DEFAULT 0,
|
||||
msg_proof TEXT NOT NULL DEFAULT '',
|
||||
msg_data TEXT NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_msg_hash
|
||||
on l2_sent_msg (msg_hash) where deleted_at IS NULL;
|
||||
|
||||
create unique index uk_nonce
|
||||
on l2_sent_msg (nonce) where deleted_at IS NULL;
|
||||
|
||||
CREATE INDEX idx_msg_hash_deleted_at_l2_sent_msg on l2_sent_msg (msg_hash, deleted_at);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON l2_sent_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l2_sent_msg;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,40 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table rollup_batch
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
batch_index BIGINT NOT NULL,
|
||||
commit_height BIGINT NOT NULL,
|
||||
start_block_number BIGINT NOT NULL,
|
||||
end_block_number BIGINT NOT NULL,
|
||||
batch_hash VARCHAR NOT NULL,
|
||||
withdraw_root TEXT DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
create unique index uk_batch_index
|
||||
on rollup_batch (batch_index) where deleted_at IS NULL;
|
||||
|
||||
create unique index uk_batch_hash
|
||||
on rollup_batch (batch_hash) where deleted_at IS NULL;
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON rollup_batch FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists rollup_batch;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,142 +0,0 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// RelayedMsg is the struct for relayed_msg table
|
||||
type RelayedMsg struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id"`
|
||||
MsgHash string `json:"msg_hash" gorm:"column:msg_hash"`
|
||||
Height uint64 `json:"height" gorm:"column:height"`
|
||||
Layer1Hash string `json:"layer1_hash" gorm:"column:layer1_hash;default:''"`
|
||||
Layer2Hash string `json:"layer2_hash" gorm:"column:layer2_hash;default:''"`
|
||||
CreatedAt *time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewRelayedMsg create an NewRelayedMsg instance
|
||||
func NewRelayedMsg(db *gorm.DB) *RelayedMsg {
|
||||
return &RelayedMsg{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the RelayedMsg model.
|
||||
func (*RelayedMsg) TableName() string {
|
||||
return "relayed_msg"
|
||||
}
|
||||
|
||||
// GetRelayedMsgByHash get relayed msg by hash
|
||||
func (r *RelayedMsg) GetRelayedMsgByHash(ctx context.Context, msgHash string) (*RelayedMsg, error) {
|
||||
var result RelayedMsg
|
||||
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
|
||||
Where("msg_hash = ?", msgHash).
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("RelayedMsg.GetRelayedMsgByHash error: %w", err)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetLatestRelayedHeightOnL1 get latest relayed height on l1
|
||||
func (r *RelayedMsg) GetLatestRelayedHeightOnL1(ctx context.Context) (uint64, error) {
|
||||
var result RelayedMsg
|
||||
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
|
||||
Select("height").
|
||||
Where("layer1_hash != ''").
|
||||
Order("height DESC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("RelayedMsg.GetLatestRelayedHeightOnL1 error: %w", err)
|
||||
}
|
||||
return result.Height, err
|
||||
}
|
||||
|
||||
// GetLatestRelayedHeightOnL2 get latest relayed height on l2
|
||||
func (r *RelayedMsg) GetLatestRelayedHeightOnL2(ctx context.Context) (uint64, error) {
|
||||
var result RelayedMsg
|
||||
err := r.db.WithContext(ctx).Model(&RelayedMsg{}).
|
||||
Select("height").
|
||||
Where("layer2_hash != ''").
|
||||
Order("height DESC").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("RelayedMsg.GetLatestRelayedHeightOnL2 error: %w", err)
|
||||
}
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// InsertRelayedMsg batch insert relayed msg into db and return the transaction
|
||||
func (r *RelayedMsg) InsertRelayedMsg(ctx context.Context, messages []*RelayedMsg, dbTx ...*gorm.DB) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := r.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&RelayedMsg{}).Create(&messages).Error
|
||||
if err != nil {
|
||||
l2hashes := make([]string, 0, len(messages))
|
||||
l1hashes := make([]string, 0, len(messages))
|
||||
heights := make([]uint64, 0, len(messages))
|
||||
for _, msg := range messages {
|
||||
l2hashes = append(l2hashes, msg.Layer2Hash)
|
||||
l1hashes = append(l1hashes, msg.Layer1Hash)
|
||||
heights = append(heights, msg.Height)
|
||||
}
|
||||
log.Error("failed to insert l2 sent messages", "l2hashes", l2hashes, "l1hashes", l1hashes, "heights", heights, "err", err)
|
||||
return fmt.Errorf("RelayedMsg.InsertRelayedMsg error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteL1RelayedHashAfterHeight delete l1 relayed hash after height
|
||||
func (r *RelayedMsg) DeleteL1RelayedHashAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
|
||||
db := r.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&RelayedMsg{}).
|
||||
Delete("height > ? AND layer1_hash != ''", height).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("RelayedMsg.DeleteL1RelayedHashAfterHeight error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteL2RelayedHashAfterHeight delete l2 relayed hash after heights
|
||||
func (r *RelayedMsg) DeleteL2RelayedHashAfterHeight(ctx context.Context, height uint64, dbTx ...*gorm.DB) error {
|
||||
db := r.db
|
||||
if len(dbTx) > 0 && dbTx[0] != nil {
|
||||
db = dbTx[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
err := db.Model(&RelayedMsg{}).
|
||||
Delete("height > ? AND layer2_hash != ''", height).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("RelayedMsg.DeleteL2RelayedHashAfterHeight error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,115 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"gorm.io/gorm/utils"
|
||||
|
||||
"bridge-history-api/config"
|
||||
)
|
||||
|
||||
type gormLogger struct {
|
||||
gethLogger log.Logger
|
||||
}
|
||||
|
||||
func (g *gormLogger) LogMode(level logger.LogLevel) logger.Interface {
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *gormLogger) Info(_ context.Context, msg string, data ...interface{}) {
|
||||
infoMsg := fmt.Sprintf(msg, data...)
|
||||
g.gethLogger.Info("gorm", "info message", infoMsg)
|
||||
}
|
||||
|
||||
func (g *gormLogger) Warn(_ context.Context, msg string, data ...interface{}) {
|
||||
warnMsg := fmt.Sprintf(msg, data...)
|
||||
g.gethLogger.Warn("gorm", "warn message", warnMsg)
|
||||
}
|
||||
|
||||
func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
|
||||
errMsg := fmt.Sprintf(msg, data...)
|
||||
g.gethLogger.Error("gorm", "err message", errMsg)
|
||||
}
|
||||
|
||||
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
|
||||
elapsed := time.Since(begin)
|
||||
sql, rowsAffected := fc()
|
||||
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
}
|
||||
|
||||
// InitDB init the db handler
|
||||
func InitDB(config *config.DBConfig) (*gorm.DB, error) {
|
||||
tmpGormLogger := gormLogger{
|
||||
gethLogger: log.Root(),
|
||||
}
|
||||
|
||||
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
|
||||
Logger: &tmpGormLogger,
|
||||
NowFunc: func() time.Time {
|
||||
// why set time to UTC.
|
||||
// if now set this, the inserted data time will use local timezone. like 2023-07-18 18:24:00 CST+8
|
||||
// but when inserted, store to postgres is 2023-07-18 18:24:00 UTC+0 the timezone is incorrect.
|
||||
// As mysql dsn user:pass@tcp(127.0.0.1:3306)/dbname?charset=utf8mb4&parseTime=True&loc=Local, we cant set
|
||||
// the timezone by loc=Local. but postgres's dsn don't have loc option to set timezone, so just need set the gorm option like that.
|
||||
t, err := nowUTC()
|
||||
if err != nil {
|
||||
log.Error("Can not get UTC time: ", "err", err)
|
||||
}
|
||||
return t
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sqlDB, pingErr := Ping(db)
|
||||
if pingErr != nil {
|
||||
return nil, pingErr
|
||||
}
|
||||
|
||||
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
|
||||
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Ping check db status
|
||||
func Ping(db *gorm.DB) (*sql.DB, error) {
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = sqlDB.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sqlDB, nil
|
||||
}
|
||||
|
||||
// CloseDB close the db handler. notice the db handler only can close when then program exit.
|
||||
func CloseDB(db *gorm.DB) error {
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := sqlDB.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nowUTC get the utc time.Now
|
||||
func nowUTC() (time.Time, error) {
|
||||
utc, err := time.LoadLocation("")
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Now().In(utc), nil
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// CommonFlags is used for app common flags in different modules
|
||||
CommonFlags = []cli.Flag{
|
||||
&ConfigFileFlag,
|
||||
&VerbosityFlag,
|
||||
&LogFileFlag,
|
||||
&LogJSONFormat,
|
||||
&LogDebugFlag,
|
||||
&MetricsEnabled,
|
||||
&MetricsAddr,
|
||||
&MetricsPort,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "JSON configuration file",
|
||||
Value: "./config.json",
|
||||
}
|
||||
// VerbosityFlag log level.
|
||||
VerbosityFlag = cli.IntFlag{
|
||||
Name: "verbosity",
|
||||
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail",
|
||||
Value: 3,
|
||||
}
|
||||
// LogFileFlag decides where the logger output is sent. If this flag is left
|
||||
// empty, it will log to stdout.
|
||||
LogFileFlag = cli.StringFlag{
|
||||
Name: "log.file",
|
||||
Usage: "Tells the module where to write log entries",
|
||||
}
|
||||
// LogJSONFormat decides the log format is json or not
|
||||
LogJSONFormat = cli.BoolFlag{
|
||||
Name: "log.json",
|
||||
Usage: "Tells the module whether log format is json or not",
|
||||
Value: true,
|
||||
}
|
||||
// LogDebugFlag make log messages with call-site location
|
||||
LogDebugFlag = cli.BoolFlag{
|
||||
Name: "log.debug",
|
||||
Usage: "Prepends log messages with call-site location (file and line number)",
|
||||
}
|
||||
// MetricsEnabled enable metrics collection and reporting
|
||||
MetricsEnabled = cli.BoolFlag{
|
||||
Name: "metrics",
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
Category: "METRICS",
|
||||
Value: false,
|
||||
}
|
||||
// MetricsAddr is listening address of Metrics reporting server
|
||||
MetricsAddr = cli.StringFlag{
|
||||
Name: "metrics.addr",
|
||||
Usage: "Metrics reporting server listening address",
|
||||
Category: "METRICS",
|
||||
Value: "127.0.0.1",
|
||||
}
|
||||
// MetricsPort is listening port of Metrics reporting server
|
||||
MetricsPort = cli.IntFlag{
|
||||
Name: "metrics.port",
|
||||
Usage: "Metrics reporting server listening port",
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
)
|
||||
@@ -1,43 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// LogSetup is for setup logger
|
||||
func LogSetup(ctx *cli.Context) error {
|
||||
var ostream log.Handler
|
||||
if logFile := ctx.String(LogFileFlag.Name); len(logFile) > 0 {
|
||||
fp, err := os.OpenFile(filepath.Clean(logFile), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to open log file", "err", err)
|
||||
}
|
||||
if ctx.Bool(LogJSONFormat.Name) {
|
||||
ostream = log.StreamHandler(io.Writer(fp), log.JSONFormat())
|
||||
} else {
|
||||
ostream = log.StreamHandler(io.Writer(fp), log.TerminalFormat(true))
|
||||
}
|
||||
} else {
|
||||
output := io.Writer(os.Stderr)
|
||||
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
|
||||
if usecolor {
|
||||
output = colorable.NewColorableStderr()
|
||||
}
|
||||
ostream = log.StreamHandler(output, log.TerminalFormat(usecolor))
|
||||
}
|
||||
// show the call file and line number
|
||||
log.PrintOrigins(ctx.Bool(LogDebugFlag.Name))
|
||||
glogger := log.NewGlogHandler(ostream)
|
||||
// Set log level
|
||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
return nil
|
||||
}
|
||||
@@ -1,391 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
backendabi "bridge-history-api/abi"
|
||||
"bridge-history-api/orm"
|
||||
)
|
||||
|
||||
// CachedParsedTxCalldata store parsed batch infos
|
||||
type CachedParsedTxCalldata struct {
|
||||
CallDataIndex uint64
|
||||
BatchIndices []uint64
|
||||
StartBlocks []uint64
|
||||
EndBlocks []uint64
|
||||
}
|
||||
|
||||
// ParseBackendL1EventLogs parses L1 watched events
|
||||
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l1CrossMsg []*orm.CrossMsg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var msgHash string
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1DepositETHSig:
|
||||
event := backendabi.DepositETH{}
|
||||
err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositETH event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC20 event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ERC20),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC721 event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC721),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1DepositERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC1155),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
Amount: event.Amount.String(),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1SentMessageEventSignature:
|
||||
event := backendabi.L1SentMessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
// since every deposit event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
|
||||
msgHash = ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).Hex()
|
||||
case backendabi.L1BatchDepositERC721Sig:
|
||||
event := backendabi.BatchERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "BatchDepositERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC721),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1BatchDepositERC1155Sig:
|
||||
event := backendabi.BatchERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC1155),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
|
||||
MsgType: int(orm.Layer1Msg),
|
||||
MsgHash: msgHash,
|
||||
})
|
||||
case backendabi.L1RelayedMessageEventSignature:
|
||||
event := backendabi.L1RelayedMessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l1CrossMsg, relayedMsgs, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
Height: vlog.BlockNumber,
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
return l1CrossMsg, relayedMsgs, nil
|
||||
}
|
||||
|
||||
// ParseBackendL2EventLogs parses L2 watched events
|
||||
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2CrossMsg []*orm.CrossMsg
|
||||
// this is use to confirm finalized l1 msg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var l2SentMsgs []*orm.L2SentMsg
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L2WithdrawETHSig:
|
||||
event := backendabi.DepositETH{}
|
||||
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawETH event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2WithdrawERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ERC20),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2WithdrawERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC721),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2WithdrawERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC1155),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenIDs: event.TokenID.String(),
|
||||
Amount: event.Amount.String(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2BatchWithdrawERC721Sig:
|
||||
event := backendabi.BatchERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC721),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2BatchWithdrawERC1155Sig:
|
||||
event := backendabi.BatchERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
l2SentMsgs[len(l2SentMsgs)-1].OriginalSender = event.From.Hex()
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC1155),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
MsgType: int(orm.Layer2Msg),
|
||||
TokenIDs: convertBigIntArrayToString(event.TokenIDs),
|
||||
TokenAmounts: convertBigIntArrayToString(event.TokenAmounts),
|
||||
MsgHash: l2SentMsgs[len(l2SentMsgs)-1].MsgHash,
|
||||
})
|
||||
case backendabi.L2SentMessageEventSignature:
|
||||
event := backendabi.L2SentMessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
// since every withdraw event will emit after a sent event, so can use this msg_hash as next withdraw event's msg_hash
|
||||
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
|
||||
l2SentMsgs = append(l2SentMsgs,
|
||||
&orm.L2SentMsg{
|
||||
Sender: event.Sender.Hex(),
|
||||
TxHash: vlog.TxHash.Hex(),
|
||||
Target: event.Target.Hex(),
|
||||
Value: event.Value.String(),
|
||||
MsgHash: msgHash.Hex(),
|
||||
Height: vlog.BlockNumber,
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgData: hexutil.Encode(event.Message),
|
||||
})
|
||||
case backendabi.L2RelayedMessageEventSignature:
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
Height: vlog.BlockNumber,
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
return l2CrossMsg, relayedMsgs, l2SentMsgs, nil
|
||||
}
|
||||
|
||||
// ParseBatchInfoFromScrollChain parses ScrollChain events
|
||||
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
|
||||
var rollupBatches []*orm.RollupBatch
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1CommitBatchEventSignature:
|
||||
event := backendabi.L1CommitBatchEvent{}
|
||||
err := UnpackLog(backendabi.ScrollChainABI, &event, "CommitBatch", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack CommitBatch event", "err", err)
|
||||
return rollupBatches, err
|
||||
}
|
||||
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
|
||||
if err != nil || isPending {
|
||||
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
|
||||
return rollupBatches, err
|
||||
}
|
||||
index, startBlock, endBlock, err := GetBatchRangeFromCalldataV2(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().Hex(), "height", vlog.BlockNumber)
|
||||
return rollupBatches, err
|
||||
}
|
||||
rollupBatches = append(rollupBatches, &orm.RollupBatch{
|
||||
CommitHeight: vlog.BlockNumber,
|
||||
BatchIndex: index,
|
||||
BatchHash: event.BatchHash.Hex(),
|
||||
StartBlockNumber: startBlock,
|
||||
EndBlockNumber: endBlock,
|
||||
})
|
||||
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
return rollupBatches, nil
|
||||
}
|
||||
|
||||
func convertBigIntArrayToString(array []*big.Int) string {
|
||||
stringArray := make([]string, len(array))
|
||||
for i, num := range array {
|
||||
stringArray[i] = num.String()
|
||||
}
|
||||
|
||||
result := strings.Join(stringArray, ", ")
|
||||
return result
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
|
||||
backendabi "bridge-history-api/abi"
|
||||
)
|
||||
|
||||
// Keccak2 compute the keccack256 of two concatenations of bytes32
|
||||
func Keccak2(a common.Hash, b common.Hash) common.Hash {
|
||||
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
|
||||
}
|
||||
|
||||
// GetSafeBlockNumber get the safe block number, which is the current block number minus the confirmations
|
||||
func GetSafeBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil || number <= confirmations {
|
||||
return 0, err
|
||||
}
|
||||
number = number - confirmations
|
||||
return number, nil
|
||||
}
|
||||
|
||||
// UnpackLog unpacks a retrieved log into the provided output structure.
|
||||
// @todo: add unit test.
|
||||
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
if log.Topics[0] != c.Events[event].ID {
|
||||
return fmt.Errorf("event signature mismatch")
|
||||
}
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var indexed abi.Arguments
|
||||
for _, arg := range c.Events[event].Inputs {
|
||||
if arg.Indexed {
|
||||
indexed = append(indexed, arg)
|
||||
}
|
||||
}
|
||||
return abi.ParseTopics(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
// ComputeMessageHash compute the message hash
|
||||
func ComputeMessageHash(
|
||||
sender common.Address,
|
||||
target common.Address,
|
||||
value *big.Int,
|
||||
messageNonce *big.Int,
|
||||
message []byte,
|
||||
) common.Hash {
|
||||
data, _ := backendabi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
|
||||
return common.BytesToHash(crypto.Keccak256(data))
|
||||
}
|
||||
|
||||
type commitBatchArgs struct {
|
||||
Version uint8
|
||||
ParentBatchHeader []byte
|
||||
Chunks [][]byte
|
||||
SkippedL1MessageBitmap []byte
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldataV2 find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldataV2(calldata []byte) (uint64, uint64, uint64, error) {
|
||||
method := backendabi.ScrollChainV2ABI.Methods["commitBatch"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
// special case: import genesis batch
|
||||
method = backendabi.ScrollChainV2ABI.Methods["importGenesisBatch"]
|
||||
_, err2 := method.Inputs.Unpack(calldata[4:])
|
||||
if err2 == nil {
|
||||
// genesis batch
|
||||
return 0, 0, 0, nil
|
||||
}
|
||||
// none of "commitBatch" and "importGenesisBatch" match, give up
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
args := commitBatchArgs{}
|
||||
err = method.Inputs.Copy(&args, values)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
var startBlock uint64
|
||||
var finishBlock uint64
|
||||
|
||||
// decode batchIndex from ParentBatchHeader
|
||||
if len(args.ParentBatchHeader) < 9 {
|
||||
return 0, 0, 0, errors.New("invalid parent batch header")
|
||||
}
|
||||
batchIndex := binary.BigEndian.Uint64(args.ParentBatchHeader[1:9]) + 1
|
||||
|
||||
// decode blocks from chunk and assume that there's no empty chunk
|
||||
// | 1 byte | 60 bytes | ... | 60 bytes |
|
||||
// | num blocks | block 1 | ... | block n |
|
||||
if len(args.Chunks) == 0 {
|
||||
return 0, 0, 0, errors.New("invalid chunks")
|
||||
}
|
||||
chunk := args.Chunks[0]
|
||||
block := chunk[1:61] // first block in chunk
|
||||
startBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
chunk = args.Chunks[len(args.Chunks)-1]
|
||||
lastBlockIndex := int(chunk[0]) - 1
|
||||
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
|
||||
finishBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
return batchIndex, startBlock, finishBlock, err
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
func TestKeccak2(t *testing.T) {
|
||||
a := common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0")
|
||||
b := common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c")
|
||||
c := utils.Keccak2(a, b)
|
||||
assert.NotEmpty(t, c)
|
||||
assert.NotEqual(t, a, c)
|
||||
assert.NotEqual(t, b, c)
|
||||
assert.Equal(t, "0xc0ffbd7f501bd3d49721b0724b2bff657cb2378f15d5a9b97cd7ea5bf630d512", c.Hex())
|
||||
}
|
||||
|
||||
func TestGetBatchRangeFromCalldataV2(t *testing.T) {
|
||||
// single chunk
|
||||
batchIndex, start, finish, err := utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003d0100000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000100000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, start, uint64(1))
|
||||
assert.Equal(t, finish, uint64(1))
|
||||
assert.Equal(t, batchIndex, uint64(1))
|
||||
|
||||
// multiple chunk
|
||||
batchIndex, start, finish, err = utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000007900000000000000000100000000000000010000000000000001038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000004c01000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030000000000010000000001000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b403000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005000300000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00050000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c01000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, start, uint64(10))
|
||||
assert.Equal(t, finish, uint64(20))
|
||||
assert.Equal(t, batchIndex, uint64(2))
|
||||
|
||||
// genesis batch
|
||||
batchIndex, start, finish, err = utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("3fdeecb200000000000000000000000000000000000000000000000000000000000000402dcb5308098d24a37fc1487a229fcedb09fa4343ede39cbad365bc925535bb09000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000c252bc9780c4d83cf11f14b8cd03c92c4d18ce07710ba836d31d12da216c8330000000000000000000000000000000000000000000000000000000000000000000000000000000"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, start, uint64(0))
|
||||
assert.Equal(t, finish, uint64(0))
|
||||
assert.Equal(t, batchIndex, uint64(0))
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
# Source: https://github.com/golangci/golangci-lint/blob/master/.golangci.example.yml
|
||||
# options for analysis running
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
# default concurrency is the available CPU number
|
||||
concurrency: 4
|
||||
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
|
||||
21
build/dockerfiles/bridgehistoryapi-api.Dockerfile
Normal file
21
build/dockerfiles/bridgehistoryapi-api.Dockerfile
Normal file
@@ -0,0 +1,21 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
RUN go mod download -x
|
||||
|
||||
# Build bridgehistoryapi-api
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge-history-api/cmd/api && go build -v -p 4 -o /bin/bridgehistoryapi-api
|
||||
|
||||
# Pull bridgehistoryapi-api into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/bridgehistoryapi-api /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["bridgehistoryapi-api"]
|
||||
@@ -1,21 +0,0 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
RUN go mod download -x
|
||||
|
||||
# Build bridgehistoryapi-cross-msg-fetcher
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge-history-api/cmd/cross_msg_fetcher && go build -v -p 4 -o /bin/bridgehistoryapi-cross-msg-fetcher
|
||||
|
||||
# Pull bridgehistoryapi-cross-msg-fetcher into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/bridgehistoryapi-cross-msg-fetcher /bin/
|
||||
|
||||
ENTRYPOINT ["bridgehistoryapi-cross-msg-fetcher"]
|
||||
@@ -14,7 +14,6 @@ RUN --mount=target=. \
|
||||
|
||||
# Pull db_cli into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/db_cli /bin/
|
||||
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["db_cli"]
|
||||
21
build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
Normal file
21
build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
Normal file
@@ -0,0 +1,21 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
RUN go mod download -x
|
||||
|
||||
# Build bridgehistoryapi-fetcher
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge-history-api/cmd/fetcher && go build -v -p 4 -o /bin/bridgehistoryapi-fetcher
|
||||
|
||||
# Pull bridgehistoryapi-fetcher into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/bridgehistoryapi-fetcher /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["bridgehistoryapi-fetcher"]
|
||||
@@ -1,21 +0,0 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
RUN go mod download -x
|
||||
|
||||
# Build bridgehistoryapi-server
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge-history-api/cmd/backend_server && go build -v -p 4 -o /bin/bridgehistoryapi-server
|
||||
|
||||
# Pull bridgehistoryapi-server into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/bridgehistoryapi-server /bin/
|
||||
|
||||
ENTRYPOINT ["bridgehistoryapi-server"]
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.19-rust-nightly-2022-12-10 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -17,7 +17,7 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.19-rust-nightly-2022-12-10 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
@@ -36,7 +36,7 @@ COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
|
||||
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
|
||||
RUN cd ./coordinator && make coordinator_skip_libzkp && mv ./build/bin/coordinator /bin/coordinator && mv internal/logic/verifier/lib /bin/
|
||||
RUN cd ./coordinator && make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
@@ -44,7 +44,7 @@ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/li
|
||||
# ENV CHAIN_ID=534353
|
||||
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/coordinator /bin/
|
||||
RUN /bin/coordinator --version
|
||||
|
||||
ENTRYPOINT ["/bin/coordinator"]
|
||||
COPY --from=builder /bin/coordinator_api /bin/
|
||||
RUN /bin/coordinator_api --version
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["/bin/coordinator_api"]
|
||||
25
build/dockerfiles/coordinator-cron.Dockerfile
Normal file
25
build/dockerfiles/coordinator-cron.Dockerfile
Normal file
@@ -0,0 +1,25 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build coordinator
|
||||
FROM base as builder
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/coordinator/cmd/cron/ && go build -v -p 4 -o /bin/coordinator_cron
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
COPY --from=builder /bin/coordinator_cron /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["coordinator_cron"]
|
||||
@@ -0,0 +1,6 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
@@ -21,7 +21,6 @@ RUN --mount=target=. \
|
||||
|
||||
# Pull db_cli into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/db_cli /bin/
|
||||
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["db_cli"]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
@@ -21,7 +21,6 @@ RUN --mount=target=. \
|
||||
|
||||
# Pull event_watcher into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/event_watcher /bin/
|
||||
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["event_watcher"]
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
@@ -21,7 +21,6 @@ RUN --mount=target=. \
|
||||
|
||||
# Pull gas_oracle into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/gas_oracle /bin/
|
||||
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["gas_oracle"]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user