Compare commits

..

1 Commits

Author SHA1 Message Date
Péter Garamvölgyi
61c44037e9 add 30 batch per tx limit 2023-05-31 20:38:30 +02:00
651 changed files with 2353309 additions and 68945 deletions

View File

@@ -1,9 +1,9 @@
### Purpose or design rationale of this PR
## 1. Purpose or design rationale of this PR
*Describe your change. Make sure to answer these three questions: What does this PR do? Why does it do it? How does it do it?*
...
### PR title
## 2. PR title
Your PR title must follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) (as we are doing squash merge for each PR), so it must start with one of the following [types](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#type):
@@ -18,17 +18,17 @@ Your PR title must follow [conventional commits](https://www.conventionalcommits
- [ ] test: Adding missing tests or correcting existing tests
### Deployment tag versioning
## 3. Deployment tag versioning
Has `tag` in `common/version.go` been updated or have you added `bump-version` label to this PR?
Has `tag` in `common/version.go` been updated?
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
- [ ] This PR doesn't involve a new deployment, git tag, docker image tag
- [ ] Yes
### Breaking change label
## 4. Breaking change label
Does this PR have the `breaking-change` label?
- [ ] No, this PR is not a breaking change
- [ ] This PR is not a breaking change
- [ ] Yes

View File

@@ -1,37 +0,0 @@
import { URL } from "url";
import { readFileSync, writeFileSync } from "fs";
const versionFilePath = new URL(
"../../common/version/version.go",
import.meta.url
).pathname;
const versionFileContent = readFileSync(versionFilePath, { encoding: "utf-8" });
const currentVersion = versionFileContent.match(
/var tag = "(?<version>v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+))"/
);
try {
parseInt(currentVersion.groups.major);
parseInt(currentVersion.groups.minor);
parseInt(currentVersion.groups.patch);
} catch (err) {
console.error(new Error("Failed to parse version in version.go file"));
throw err;
}
// prettier-ignore
const newVersion = `v${currentVersion.groups.major}.${currentVersion.groups.minor}.${parseInt(currentVersion.groups.patch) + 1}`;
console.log(
`Bump version from ${currentVersion.groups.version} to ${newVersion}`
);
writeFileSync(
versionFilePath,
versionFileContent.replace(
`var tag = "${currentVersion.groups.version}"`,
`var tag = "${newVersion}"`
)
);

79
.github/workflows/bridge.yml vendored Normal file
View File

@@ -0,0 +1,79 @@
name: Bridge
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'bridge/**'
- '.github/workflows/bridge.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'bridge/**'
- '.github/workflows/bridge.yml'
defaults:
run:
working-directory: 'bridge'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make mock_abi
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/bridge/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
# docker-build:
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - run: make docker

View File

@@ -1,4 +1,4 @@
name: BridgeHistoryAPI
name: BridgeHistoryApi
on:
push:
@@ -25,20 +25,20 @@ defaults:
working-directory: 'bridge-history-api'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
# check:
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# steps:
# - name: Install Go
# uses: actions/setup-go@v2
# with:
# go-version: 1.20.x
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Lint
# run: |
# rm -rf $HOME/.cache/golangci-lint
# make lint
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
@@ -51,13 +51,8 @@ jobs:
uses: actions/checkout@v2
- name: Test
run: |
go get ./...
make test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: bridge-history-api
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
@@ -78,3 +73,4 @@ jobs:
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -1,63 +0,0 @@
name: Bump version
on:
pull_request:
branches: [ develop ]
types:
- opened
- reopened
- synchronize
- ready_for_review
- labeled
jobs:
try-to-bump:
if: contains(github.event.pull_request.labels.*.name, 'bump-version')
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: check diff
id: check_diff
run: |
set -euo pipefail
# fetch develop branch so that we can diff against later
git fetch origin develop
echo 'checking verion changes in diff...'
# check if version changed in version.go
# note: the grep will fail if use \d instead of [0-9]
git diff HEAD..origin/develop --text --no-ext-diff --unified=0 --no-prefix common/version/version.go | grep -E '^\+var tag = "v[0-9]+\.[0-9]+\.[0-9]+"$' && true
exit_code=$?
# auto bump if version is not bumped manually
echo '> require auto version bump?'
if [ $exit_code -eq 0 ]; then
echo '> no, already bumped'
echo "result=no-bump" >> "$GITHUB_OUTPUT"
else
echo '> yes'
echo "result=bump" >> "$GITHUB_OUTPUT"
fi
- name: Install Node.js 16
if: steps.check_diff.outputs.result == 'bump'
uses: actions/setup-node@v3
with:
node-version: 16
- name: bump version in common/version/version.go
if: steps.check_diff.outputs.result == 'bump'
run: node .github/scripts/bump_version_dot_go.mjs
# Commits made by this Action do not trigger new Workflow runs
- uses: stefanzweifel/git-auto-commit-action@3ea6ae190baf489ba007f7c92608f33ce20ef04a
if: steps.check_diff.outputs.result == 'bump'
with:
skip_fetch: true # already did fetch in check diff
file_pattern: "common/version/version.go"
commit_message: "chore: auto version bump[bot]"

View File

@@ -9,7 +9,6 @@ on:
- alpha
paths:
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/common.yml'
pull_request:
types:
@@ -19,9 +18,12 @@ on:
- ready_for_review
paths:
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/common.yml'
defaults:
run:
working-directory: 'common'
jobs:
check:
if: github.event.pull_request.draft == false
@@ -35,7 +37,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo
@@ -43,7 +45,6 @@ jobs:
with:
workspaces: "common/libzkp/impl -> target"
- name: Lint
working-directory: 'common'
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
@@ -54,52 +55,16 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
working-directory: 'common'
run: goimports -local scroll-tech/common/ -w .
- name: Run go mod tidy
working-directory: 'common'
run: go mod tidy
- run: goimports -local scroll-tech/common/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'common'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
- name: Test common packages
working-directory: 'common'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: common

View File

@@ -82,15 +82,17 @@ jobs:
run : forge coverage --report lcov
- name : Prune coverage
run : lcov --rc branch_coverage=1 --remove ./lcov.info -o ./lcov.info.pruned 'src/mocks/*' 'src/test/*' 'scripts/*' 'node_modules/*' 'lib/*'
run : lcov --remove ./lcov.info -o ./lcov.info.pruned 'src/mocks/*' 'src/test/*' 'scripts/*' 'node_modules/*' 'lib/*'
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- name: Report code coverage
uses: zgosalvez/github-actions-report-lcov@v3
with:
files: contracts/lcov.info.pruned
flags: contracts
coverage-files: contracts/lcov.info.pruned
minimum-coverage: 0
artifact-name: code-coverage-report
github-token: ${{ secrets.GITHUB_TOKEN }}
working-directory: contracts
update-comment: true
hardhat:
if: github.event.pull_request.draft == false

View File

@@ -9,9 +9,6 @@ on:
- alpha
paths:
- 'coordinator/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/coordinator.yml'
pull_request:
types:
@@ -21,11 +18,12 @@ on:
- ready_for_review
paths:
- 'coordinator/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/coordinator.yml'
defaults:
run:
working-directory: 'coordinator'
jobs:
check:
if: github.event.pull_request.draft == false
@@ -39,11 +37,10 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
working-directory: 'coordinator'
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
@@ -54,20 +51,15 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
working-directory: 'coordinator'
run: goimports -local scroll-tech/coordinator/ -w .
- name: Run go mod tidy
working-directory: 'coordinator'
run: go mod tidy
- run: goimports -local scroll-tech/coordinator/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'coordinator'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
@@ -88,35 +80,3 @@ jobs:
# push: false
# # cache-from: type=gha,scope=${{ github.workflow }}
# # cache-to: type=gha,scope=${{ github.workflow }}
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
- name: Test coordinator packages
working-directory: 'coordinator'
run: |
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: coordinator

View File

@@ -9,8 +9,6 @@ on:
- alpha
paths:
- 'database/**'
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/database.yml'
pull_request:
types:
@@ -20,10 +18,12 @@ on:
- ready_for_review
paths:
- 'database/**'
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/database.yml'
defaults:
run:
working-directory: 'database'
jobs:
check:
if: github.event.pull_request.draft == false
@@ -32,11 +32,10 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
working-directory: 'database'
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
@@ -47,52 +46,16 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
working-directory: 'database'
run: goimports -local scroll-tech/database/ -w .
- name: Run go mod tidy
working-directory: 'database'
run: go mod tidy
- run: goimports -local scroll-tech/database/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'database'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
- name: Test database packages
working-directory: 'database'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: database

83
.github/workflows/docker.yaml vendored Normal file
View File

@@ -0,0 +1,83 @@
name: Docker
on:
push:
tags:
- v**
jobs:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push event_watcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
push: true
tags: scrolltech/event-watcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push gas_oracle docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
push: true
tags: scrolltech/gas-oracle:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push msg_relayer docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/msg_relayer.Dockerfile
push: true
tags: scrolltech/msg-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push rollup_relayer docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
push: true
tags: scrolltech/rollup-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push bridgehistoryapi-cross-msg-fetcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-cross-msg-fetcher.Dockerfile
push: true
tags: scrolltech/bridgehistoryapi-cross-msg-fetcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push bridgehistoryapi-server docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-server.Dockerfile
push: true
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator.Dockerfile
push: true
tags: scrolltech/coordinator:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

View File

@@ -1,169 +0,0 @@
name: Docker
on:
push:
tags:
- v**
jobs:
event_watcher:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push event_watcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
push: true
tags: |
scrolltech/event-watcher:${{github.ref_name}}
scrolltech/event-watcher:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
gas_oracle:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push gas_oracle docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
push: true
tags: |
scrolltech/gas-oracle:${{github.ref_name}}
scrolltech/gas-oracle:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
rollup_relayer:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push rollup_relayer docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
push: true
tags: |
scrolltech/rollup-relayer:${{github.ref_name}}
scrolltech/rollup-relayer:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-fetcher:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-fetcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
push: true
tags: |
scrolltech/bridgehistoryapi-fetcher:${{github.ref_name}}
scrolltech/bridgehistoryapi-fetcher:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-api:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-api docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
push: true
tags: |
scrolltech/bridgehistoryapi-api:${{github.ref_name}}
scrolltech/bridgehistoryapi-api:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
coordinator-api:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
push: true
tags: |
scrolltech/coordinator-api:${{github.ref_name}}
scrolltech/coordinator-api:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
coordinator-cron:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator-cron.Dockerfile
push: true
tags: |
scrolltech/coordinator-cron:${{github.ref_name}}
scrolltech/coordinator-cron:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

View File

@@ -1,43 +0,0 @@
name: Integration
on:
push:
branches:
- main
- staging
- develop
- alpha
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
jobs:
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
make -C rollup mock_abi
make -C common/bytecode all
- name: Run integration tests
run: |
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...

View File

@@ -1,59 +0,0 @@
name: Intermediate Docker
on:
workflow_dispatch:
inputs:
GO_VERSION:
description: 'Go version'
required: true
type: string
default: '1.20'
RUST_VERSION:
description: 'Rust toolchain version'
required: true
type: string
default: 'nightly-2022-12-10'
PYTHON_VERSION:
description: 'Python version'
required: false
type: string
default: '3.10'
CUDA_VERSION:
description: 'Cuda version'
required: false
type: string
default: '11.7.1'
defaults:
run:
working-directory: 'build/dockerfiles/intermediate'
jobs:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build
run: |
make all
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Publish
run: |
make publish
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}

View File

@@ -1,4 +1,4 @@
name: Prover
name: Roller
on:
push:
@@ -8,8 +8,8 @@ on:
- develop
- alpha
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
- 'roller/**'
- '.github/workflows/roller.yml'
pull_request:
types:
- opened
@@ -17,37 +17,18 @@ on:
- synchronize
- ready_for_review
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
- 'roller/**'
- '.github/workflows/roller.yml'
defaults:
run:
working-directory: 'prover'
working-directory: 'roller'
jobs:
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: prover
compile:
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
@@ -56,7 +37,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo
@@ -65,7 +46,8 @@ jobs:
workspaces: "common/libzkp/impl -> target"
- name: Test
run: |
make prover
make roller
go test -tags="mock_prover" -v ./...
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
@@ -73,7 +55,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
@@ -87,12 +69,12 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/prover/ -w .
- run: goimports -local scroll-tech/roller/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy

View File

@@ -1,123 +0,0 @@
name: Rollup
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'rollup/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/rollup.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'rollup/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/rollup.yml'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint
working-directory: 'rollup'
run: |
rm -rf $HOME/.cache/golangci-lint
make mock_abi
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
run: goimports -local scroll-tech/rollup/ -w .
working-directory: 'rollup'
- name: Run go mod tidy
run: go mod tidy
working-directory: 'rollup'
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'rollup'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
make -C rollup mock_abi
- name: Build rollup binaries
working-directory: 'rollup'
run: |
make rollup_bins
- name: Test rollup packages
working-directory: 'rollup'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: rollup
# docker-build:
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - run: make docker

17
.gitignore vendored
View File

@@ -1,22 +1,9 @@
# Asset files
.idea
assets/params*
assets/seed
# Built binaries
build/bin
coverage.txt
build/bin
*.integration.txt
# Visual Studio Code
.vscode
# IntelliJ
.idea
# MacOS
.DS_Store
# misc
sftp-config.json
*~

3
.gitmodules vendored
View File

@@ -1,6 +1,9 @@
[submodule "l2geth"]
path = l2geth
url = git@github.com:scroll-tech/go-ethereum.git
[submodule "rpc-gateway"]
path = rpc-gateway
url = git@github.com:scroll-tech/rpc-gateway.git
[submodule "contracts/lib/ds-test"]
path = contracts/lib/ds-test
url = https://github.com/dapphub/ds-test

View File

@@ -1,128 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct that could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
contributor@scroll.io.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

View File

@@ -1,43 +0,0 @@
## Contributing
[fork]: /fork
[pr]: /compare
[style]: https://standardjs.com/
[code-of-conduct]: CODE_OF_CONDUCT.md
Hi there! We're thrilled that you'd like to contribute to this project. Your help is essential for keeping it great.
Please note that this project is released with a [Contributor Code of Conduct][code-of-conduct]. By participating in this project you agree to abide by its terms.
## Contribute to Scroll
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
## Issues and PRs
If you have suggestions for how this project could be improved, or want to report a bug, open an issue! We'd love all and any contributions. If you have questions, too, we'd love to hear them.
We'd also love PRs. If you're thinking of a large PR, we advise opening up an issue first to talk about it, though! Look at the links below if you're not sure how to open a PR.
## Submitting a pull request
1. [Fork][fork] and clone the repository.
2. Create a new branch: `git checkout -b my-branch-name`.
3. Make your change, add tests, and make sure the tests still pass.
4. Format your code in scroll home directory: `make lint && make fmt`
5. Push to your fork and [submit a pull request][pr].
6. Pat yourself on the back and wait for your pull request to be reviewed and merged.
Here are a few things you can do that will increase the likelihood of your pull request being accepted:
- Write and update tests.
- Keep your changes as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
Work in Progress pull requests are also welcome to get feedback early on, or if there is something that blocked you.
## Resources
- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/)
- [GitHub Help](https://help.github.com)

99
Jenkinsfile vendored Normal file
View File

@@ -0,0 +1,99 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
pipeline {
agent any
options {
timeout (20)
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./coordinator/verifier/lib"
CHAIN_ID='534353'
// LOG_DOCKER = 'true'
}
stages {
stage('Build') {
parallel {
stage('Build Prerequisite') {
steps {
sh 'make dev_docker'
sh 'make -C bridge mock_abi'
sh 'make -C common/bytecode all'
}
}
stage('Check Bridge Compilation') {
steps {
sh 'make -C bridge bridge_bins'
}
}
stage('Check Coordinator Compilation') {
steps {
sh 'export PATH=/home/ubuntu/go/bin:$PATH'
sh 'make -C coordinator coordinator'
}
}
stage('Check Database Compilation') {
steps {
sh 'make -C database db_cli'
}
}
stage('Check Database Docker Build') {
steps {
sh 'make -C database docker'
}
}
}
}
stage('Parallel Test') {
parallel{
stage('Race test common package') {
steps {
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic scroll-tech/common/...'
}
}
stage('Race test bridge package') {
steps {
sh "cd ./bridge && ../build/run_tests.sh bridge"
}
}
stage('Race test coordinator package') {
steps {
sh 'cd ./coordinator && go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=../coverage.coordinator.txt -covermode=atomic ./...'
}
}
stage('Race test database package') {
steps {
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic scroll-tech/database/...'
}
}
stage('Integration test') {
steps {
sh 'go test -v -tags="mock_prover mock_verifier" -p 1 scroll-tech/integration-test/...'
}
}
}
}
stage('Compare Coverage') {
steps {
sh "./build/post-test-report-coverage.sh"
script {
currentBuild.result = 'SUCCESS'
}
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
}
}
}
post {
always {
publishCoverage adapters: [coberturaReportAdapter(path: 'cobertura.xml', thresholds: [[thresholdTarget: 'Aggregated Report', unhealthyThreshold: 40.0]])], checksName: '', sourceFileResolver: sourceFiles('NEVER_STORE')
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
}
}
}

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2022-2023 Scroll
Copyright (c) 2022 Scroll
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,46 +1,34 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
.PHONY: check update dev_docker build_test_docker run_test_docker clean
L2GETH_TAG=scroll-v5.1.6
ZKP_VERSION=release-1220
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
update:
go work sync
cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
lint: ## The code's format and security checks.
make -C rollup lint
make -C bridge lint
make -C common lint
make -C coordinator lint
make -C database lint
make -C prover lint
make -C roller lint
make -C bridge-history-api lint
fmt: ## format the code
update: ## update dependencies
go work sync
cd $(PWD)/bridge-history-api/ && go mod tidy
cd $(PWD)/common/ && go mod tidy
cd $(PWD)/coordinator/ && go mod tidy
cd $(PWD)/database/ && go mod tidy
cd $(PWD)/prover/ && go mod tidy
cd $(PWD)/rollup/ && go mod tidy
cd $(PWD)/tests/integration-test/ && go mod tidy
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v3.1.12 && go mod tidy
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v3.1.12 && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v3.1.12 && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v3.1.12 && go mod tidy
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v3.1.12 && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .
goimports -local $(PWD)/database/ -w .
goimports -local $(PWD)/prover/ -w .
goimports -local $(PWD)/rollup/ -w .
goimports -local $(PWD)/tests/integration-test/ -w .
goimports -local $(PWD)/roller/ -w .
dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
@@ -52,5 +40,16 @@ build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app scroll_test_image
test_zkp: ## Test zkp prove and verify, roller/prover generates the proof and coordinator/verifier verifies it
mkdir -p test_params
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params19 -O ./test_params/params19
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params26 -O ./test_params/params26
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_seed -O test_seed
rm -rf ./roller/assets/test_params && mv test_params ./roller/assets/ && mv test_seed ./roller/assets/
cd ./roller && make test-gpu-prover
rm -rf ./coordinator/assets/test_params && mv ./roller/assets/test_params ./coordinator/assets/ && mv ./roller/assets/agg_proof ./coordinator/assets/
cd ./coordinator && make test-gpu-verifier
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -1,35 +1,9 @@
# Scroll Monorepo
[![rollup](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml)
[![contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml)
[![bridge-history](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml)
[![coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml)
[![prover](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml)
[![integration](https://github.com/scroll-tech/scroll/actions/workflows/integration.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/integration.yml)
[![codecov](https://codecov.io/gh/scroll-tech/scroll/branch/develop/graph/badge.svg?token=VJVHNQWGGW)](https://codecov.io/gh/scroll-tech/scroll)
<a href="https://scroll.io">Scroll</a> is a zkRollup Layer 2 dedicated to enhance Ethereum scalability through a bytecode-equivalent [zkEVM](https://github.com/scroll-tech/zkevm-circuits) circuit. This monorepo encompasses essential infrastructure components of the Scroll protocol. It contains the L1 and L2 contracts, the rollup node, the prover client, and the prover coordinator.
## Directory Structure
<pre>
├── <a href="./bridge-history-api/">bridge-history-api</a>: Bridge history service that collects deposit and withdraw events from both L1 and L2 chains and generates withdrawal proofs
├── <a href="./common/">common</a>: Common libraries and types
├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers
├── <a href="./database">database</a>: Database client and schema definition
├── <a href="./src">l2geth</a>: Scroll execution node
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
├── <a href="./rollup">rollup</a>: Rollup-related services
├── <a href="./rpc-gateway">rpc-gateway</a>: RPC gateway external repo
└── <a href="./tests">tests</a>: Integration tests
</pre>
## Contributing
We welcome community contributions to this repository. Before you submit any issues or PRs, please read the [Code of Conduct](CODE_OF_CONDUCT.md) and the [Contribution Guideline](CONTRIBUTING.md).
[![Contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [![Bridge](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [![Coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [![Database](https://github.com/scroll-tech/scroll/actions/workflows/database.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [![Common](https://github.com/scroll-tech/scroll/actions/workflows/common.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [![Roller](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
## Prerequisites
+ Go 1.20
+ Go 1.18
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ Hardhat / Foundry
+ Docker
@@ -41,14 +15,14 @@ docker pull postgres
make dev_docker
```
## Testing Rollup & Coordinator
## Testing Bridge & Coordinator
### For Non-Apple Silicon (M1/M2) Macs
Run the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -v -race -covermode=atomic scroll-tech/bridge/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
@@ -81,7 +55,7 @@ This command runs a Docker container named `scroll_test_container` from the `scr
Once the Docker container is running, execute the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -v -race -covermode=atomic scroll-tech/bridge/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
@@ -89,10 +63,6 @@ go test -v -race -covermode=atomic scroll-tech/common/...
## Testing Contracts
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).
You can find the unit tests in [`<REPO_DIR>/contracts/src/test/`](/contracts/src/test/), and integration tests in [`<REPO_DIR>/contracts/integration-test/`](/contracts/integration-test/).
See [`contracts`](/contracts) for more details on the contracts.
## License
Scroll Monorepo is licensed under the [MIT](./LICENSE) license.
For more details on contracts, see [`/contracts`](/contracts).

0
assets/.gitkeep Normal file
View File

View File

@@ -1,7 +1,6 @@
.PHONY: lint
REPO_ROOT_DIR=./..
IMAGE_VERSION=latest
PWD=$(shell pwd)
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
@@ -12,31 +11,16 @@ test:
bridgehistoryapi-db-cli:
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli
bridgehistoryapi-fetcher:
go build -o $(PWD)/build/bin/bridgehistoryapi-fetcher ./cmd/fetcher
bridgehistoryapi-cross-msg-fetcher:
go build -o $(PWD)/build/bin/bridgehistoryapi-cross-msg-fetcher ./cmd/cross_msg_fetcher
bridgehistoryapi-api:
go build -o $(PWD)/build/bin/bridgehistoryapi-api ./cmd/api
bridgehistoryapi-server:
go build -o $(PWD)/build/bin/bridgehistoryapi-server ./cmd/backend_server
reset-env:
if docker ps -a -q -f name=bridgehistoryapi-redis | grep -q . ; then \
docker stop bridgehistoryapi-redis; \
docker rm bridgehistoryapi-redis; \
fi
docker run --name bridgehistoryapi-redis -d -p 6379:6379 redis:latest
if docker ps -a -q -f name=bridgehistoryapi-history-db | grep -q . ; then \
docker stop bridgehistoryapi-history-db; \
docker rm bridgehistoryapi-history-db; \
fi
db-docker:
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
until docker exec bridgehistoryapi-history-db pg_isready -h localhost -p 5432 -U postgres > /dev/null; do \
echo "Waiting for postgres to be ready..."; \
sleep 1; \
done
echo "Postgres is ready."
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli && $(PWD)/build/bin/bridgehistoryapi-db-cli reset
bridgehistoryapi-docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-cross-msg-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-cross-msg-fetcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-server:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-server.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile

View File

@@ -1,81 +1 @@
# bridge-history-api
This directory contains the `bridge-history-api` service that provides REST APIs to query txs interact with Scroll official bridge contracts
## Instructions
The bridge-history-api contains three distinct components
### bridgehistoryapi-db-cli
Provide init, show version, rollback, and check status services of DB
```
cd ./bridge-history-api
make bridgehistoryapi-db-cli
./build/bin/bridgehistoryapi-db-cli [command]
```
### bridgehistoryapi-fetcher
Fetch the transactions from both L1 and L2
```
cd ./bridge-history-api
make bridgehistoryapi-fetcher
./build/bin/bridgehistoryapi-fetcher
```
### bridgehistoryapi-api
provides REST APIs. Please refer to the API details below.
```
cd ./bridge-history-api
make bridgehistoryapi-api
./build/bin/bridgehistoryapi-api
```
## APIs provided by bridgehistoryapi-api
1. `/api/txs`
```
// @Summary get all txs under the given address
// @Accept plain
// @Produce plain
// @Param address query string true "wallet address"
// @Param page_size query int true "page size"
// @Param page query int true "page"
// @Success 200
// @Router /api/txs [get]
```
2. `/api/l2/withdrawals`
```
// @Summary get all L2 withdrawals under given address
// @Accept plain
// @Produce plain
// @Param address query string true "wallet address"
// @Param page_size query int true "page size"
// @Param page query int true "page"
// @Success 200
// @Router /api/l2/withdrawals [get]
```
3. `/api/l2/unclaimed/withdrawals`
```
// @Summary get all L2 unclaimed withdrawals under the given address
// @Accept plain
// @Produce plain
// @Param address query string true "wallet address"
// @Param page_size query int true "page size"
// @Param page query int true "page"
// @Success 200
// @Router /api/l2/unclaimed/withdrawals [get]
```
4. `/api/txsbyhashes`
```
// @Summary get txs by given tx hashes
// @Accept plain
// @Produce plain
// @Param hashes query string array true "array of hashes"
// @Success 200
// @Router /api/txsbyhashes [post]
```

File diff suppressed because one or more lines are too long

View File

@@ -1,105 +0,0 @@
package app
import (
"crypto/tls"
"fmt"
"os"
"os/signal"
"time"
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/controller/api"
"scroll-tech/bridge-history-api/internal/route"
)
var app *cli.App
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "Scroll Bridge History API Web Service"
app.Usage = "The Scroll Bridge History API Web Service"
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db", "err", err)
}
defer func() {
if deferErr := database.CloseDB(db); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
opts := &redis.Options{
Addr: cfg.Redis.Address,
Username: cfg.Redis.Username,
Password: cfg.Redis.Password,
MinIdleConns: cfg.Redis.MinIdleConns,
ReadTimeout: time.Duration(cfg.Redis.ReadTimeoutMs * int(time.Millisecond)),
}
// Production Redis service has enabled transit_encryption.
if !cfg.Redis.Local {
opts.TLSConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
InsecureSkipVerify: true, //nolint:gosec
}
}
log.Info("init redis client", "addr", opts.Addr, "user name", opts.Username, "is local", cfg.Redis.Local,
"min idle connections", opts.MinIdleConns, "read timeout", opts.ReadTimeout)
redisClient := redis.NewClient(opts)
api.InitController(db, redisClient)
router := gin.Default()
registry := prometheus.DefaultRegisterer
route.Route(router, cfg, registry)
go func() {
port := ctx.Int(utils.ServicePortFlag.Name)
if runServerErr := router.Run(fmt.Sprintf(":%d", port)); runServerErr != nil {
log.Crit("run http server failure", "error", runServerErr)
}
}()
observability.Server(ctx, db)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge-history-api/cmd/api/app"
func main() {
app.Run()
}

View File

@@ -0,0 +1,95 @@
package app
import (
"fmt"
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/kataras/iris/v12"
"github.com/kataras/iris/v12/mvc"
"github.com/urfave/cli/v2"
"bridge-history-api/config"
"bridge-history-api/controller"
"bridge-history-api/db"
"bridge-history-api/service"
cutils "bridge-history-api/utils"
)
var (
app *cli.App
)
var database db.OrmFactory
func pong(ctx iris.Context) {
ctx.WriteString("pong")
}
func setupQueryByAddressHandler(backend_app *mvc.Application) {
// Register Dependencies.
backend_app.Register(
database,
service.NewHistoryService,
)
// Register Controllers.
backend_app.Handle(new(controller.QueryAddressController))
}
func setupQueryByHashHandler(backend_app *mvc.Application) {
backend_app.Register(
database,
service.NewHistoryService,
)
backend_app.Handle(new(controller.QueryHashController))
}
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "Scroll Bridge History Web Service"
app.Usage = "The Scroll Bridge History Web Service"
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
database, err = db.NewOrmFactory(cfg)
if err != nil {
log.Crit("can not connect to database", "err", err)
}
defer database.Close()
bridgeApp := iris.New()
bridgeApp.Get("/ping", pong).Describe("healthcheck")
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler)
mvc.Configure(bridgeApp.Party("/api/txsbyhashes"), setupQueryByHashHandler)
// TODO: make debug mode configurable
err = bridgeApp.Listen(":8080", iris.WithLogLevel("debug"))
if err != nil {
log.Crit("can not start server", "err", err)
}
return nil
}
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "bridge-history-api/cmd/backend_server/app"
func main() {
app.Run()
}

View File

@@ -0,0 +1,117 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2"
"bridge-history-api/config"
"bridge-history-api/cross_msg"
"bridge-history-api/db"
cutils "bridge-history-api/utils"
)
var (
app *cli.App
)
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "Scroll Bridge History API"
app.Usage = "The Scroll Bridge Web Backend"
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
defer cancel()
l1client, err := ethclient.Dial(cfg.L1.Endpoint)
if err != nil {
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
}
l2client, err := ethclient.Dial(cfg.L2.Endpoint)
if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
db, err := db.NewOrmFactory(cfg)
if err != nil {
log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
}
l1worker := &cross_msg.FetchEventWorker{F: cross_msg.L1FetchAndSaveEvents, G: cross_msg.GetLatestL1ProcessedHeight, Name: "L1 events fetch Worker"}
l2worker := &cross_msg.FetchEventWorker{F: cross_msg.L2FetchAndSaveEvents, G: cross_msg.GetLatestL2ProcessedHeight, Name: "L2 events fetch Worker"}
l1AddressList := []common.Address{
common.HexToAddress(cfg.L1.CustomERC20GatewayAddr),
common.HexToAddress(cfg.L1.ERC721GatewayAddr),
common.HexToAddress(cfg.L1.ERC1155GatewayAddr),
common.HexToAddress(cfg.L1.MessengerAddr),
common.HexToAddress(cfg.L1.ETHGatewayAddr),
common.HexToAddress(cfg.L1.StandardERC20Gateway),
common.HexToAddress(cfg.L1.WETHGatewayAddr),
}
l2AddressList := []common.Address{
common.HexToAddress(cfg.L2.CustomERC20GatewayAddr),
common.HexToAddress(cfg.L2.ERC721GatewayAddr),
common.HexToAddress(cfg.L2.ERC1155GatewayAddr),
common.HexToAddress(cfg.L2.MessengerAddr),
common.HexToAddress(cfg.L2.ETHGatewayAddr),
common.HexToAddress(cfg.L2.StandardERC20Gateway),
common.HexToAddress(cfg.L2.WETHGatewayAddr),
}
l1crossMsgFetcher, err := cross_msg.NewCrossMsgFetcher(subCtx, cfg.L1, db, l1client, l1worker, l1AddressList, cross_msg.L1ReorgHandling)
if err != nil {
log.Crit("failed to create l1 cross message fetcher", "error", err)
}
go l1crossMsgFetcher.Start()
defer l1crossMsgFetcher.Stop()
l2crossMsgFetcher, err := cross_msg.NewCrossMsgFetcher(subCtx, cfg.L2, db, l2client, l2worker, l2AddressList, cross_msg.L2ReorgHandling)
if err != nil {
log.Crit("failed to create l2 cross message fetcher", "error", err)
}
go l2crossMsgFetcher.Start()
defer l2crossMsgFetcher.Stop()
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "bridge-history-api/cmd/cross_msg_fetcher/app"
func main() {
app.Run()
}

View File

@@ -6,13 +6,17 @@ import (
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
"bridge-history-api/utils"
)
var app *cli.App
var (
// Set up database app info.
app *cli.App
)
func init() {
app = cli.NewApp()
// Set up database app info.
app.Name = "db_cli"
app.Usage = "The Scroll Bridge-history-api DB CLI"
app.Flags = append(app.Flags, utils.CommonFlags...)
@@ -56,8 +60,7 @@ func init() {
Name: "version",
Usage: "Rollback to the specified version.",
Value: 0,
},
},
}},
},
}
}

View File

@@ -1,15 +1,14 @@
package app
import (
"github.com/scroll-tech/go-ethereum/log"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
"github.com/urfave/cli/v2"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/utils"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm/migrate"
"bridge-history-api/config"
"bridge-history-api/db"
"bridge-history-api/db/migrate"
"bridge-history-api/utils"
)
func getConfig(ctx *cli.Context) (*config.Config, error) {
@@ -21,8 +20,14 @@ func getConfig(ctx *cli.Context) (*config.Config, error) {
return dbCfg, nil
}
func initDB(dbCfg *database.Config) (*gorm.DB, error) {
return database.InitDB(dbCfg)
func initDB(dbCfg *config.Config) (*sqlx.DB, error) {
factory, err := db.NewOrmFactory(dbCfg)
if err != nil {
return nil, err
}
log.Debug("Got db config from env", "driver name", dbCfg.DB.DriverName, "dsn", dbCfg.DB.DSN)
return factory.GetDB(), nil
}
// resetDB clean or reset database.
@@ -31,15 +36,11 @@ func resetDB(ctx *cli.Context) error {
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
db, err := initDB(cfg)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
err = migrate.ResetDB(db)
err = migrate.ResetDB(db.DB)
if err != nil {
return err
}
@@ -53,15 +54,12 @@ func checkDBStatus(ctx *cli.Context) error {
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
db, err := initDB(cfg)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
return migrate.Status(db)
return migrate.Status(db.DB)
}
// dbVersion return the latest version
@@ -70,15 +68,12 @@ func dbVersion(ctx *cli.Context) error {
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
db, err := initDB(cfg)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
version, err := migrate.Current(db)
version, err := migrate.Current(db.DB)
log.Info("show database version", "db version", version)
return err
@@ -90,15 +85,12 @@ func migrateDB(ctx *cli.Context) error {
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
db, err := initDB(cfg)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
return migrate.Migrate(db)
return migrate.Migrate(db.DB)
}
// rollbackDB rollback db by version
@@ -107,14 +99,10 @@ func rollbackDB(ctx *cli.Context) error {
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
db, err := initDB(cfg)
if err != nil {
return err
}
version := ctx.Int64("version")
return migrate.Rollback(db, &version)
return migrate.Rollback(db.DB, &version)
}

View File

@@ -1,6 +1,6 @@
package main
import "scroll-tech/bridge-history-api/cmd/db_cli/app"
import "bridge-history-api/cmd/db_cli/app"
func main() {
app.Run()

View File

@@ -1,93 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/controller/fetcher"
)
var app *cli.App
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "Scroll Bridge History API Message Fetcher"
app.Usage = "The Scroll Bridge History API Message Fetcher"
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
defer cancel()
l1Client, err := ethclient.Dial(cfg.L1.Endpoint)
if err != nil {
log.Crit("failed to connect to L1 geth", "endpoint", cfg.L1.Endpoint, "err", err)
}
l2Client, err := ethclient.Dial(cfg.L2.Endpoint)
if err != nil {
log.Crit("failed to connect to L2 geth", "endpoint", cfg.L2.Endpoint, "err", err)
}
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db", "err", err)
}
defer func() {
if deferErr := database.CloseDB(db); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
if err != nil {
log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
}
observability.Server(ctx, db)
l1MessageFetcher := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
go l1MessageFetcher.Start()
l2MessageFetcher := fetcher.NewL2MessageFetcher(subCtx, cfg.L2, db, l2Client)
go l2MessageFetcher.Start()
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge-history-api/cmd/fetcher/app"
func main() {
app.Run()
}

View File

@@ -1,53 +0,0 @@
{
"L1": {
"confirmation": 0,
"endpoint": "https://rpc.ankr.com/eth",
"startHeight": 18306000,
"blockTime": 10,
"fetchLimit": 30,
"MessengerAddr": "0x6774Bcbd5ceCeF1336b5300fb5186a12DDD8b367",
"ETHGatewayAddr": "0x7F2b8C31F88B6006c382775eea88297Ec1e3E905",
"WETHGatewayAddr": "0x7AC440cAe8EB6328de4fA621163a792c1EA9D4fE",
"StandardERC20GatewayAddr": "0xD8A791fE2bE73eb6E6cF1eb0cb3F36adC9B3F8f9",
"CustomERC20GatewayAddr": "0xb2b10a289A229415a124EFDeF310C10cb004B6ff",
"ERC721GatewayAddr": "0x6260aF48e8948617b8FA17F4e5CEa2d21D21554B",
"ERC1155GatewayAddr": "0xb94f7F6ABcb811c5Ac709dE14E37590fcCd975B6",
"USDCGatewayAddr": "0xf1AF3b23DE0A5Ca3CAb7261cb0061C0D779A5c7B",
"LIDOGatewayAddr": "0x6625C6332c9F91F2D27c304E729B86db87A3f504",
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
},
"L2": {
"confirmation": 0,
"endpoint": "https://rpc.scroll.io",
"blockTime": 3,
"fetchLimit": 100,
"MessengerAddr": "0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC",
"ETHGatewayAddr": "0x6EA73e05AdC79974B931123675ea8F78FfdacDF0",
"WETHGatewayAddr": "0x7003E7B7186f0E6601203b99F7B8DECBfA391cf9",
"StandardERC20GatewayAddr": "0xE2b4795039517653c5Ae8C2A9BFdd783b48f447A",
"CustomERC20GatewayAddr": "0x64CCBE37c9A82D85A1F2E74649b7A42923067988",
"ERC721GatewayAddr": "0x7bC08E1c04fb41d75F1410363F0c5746Eae80582",
"ERC1155GatewayAddr": "0x62597Cc19703aF10B58feF87B0d5D29eFE263bcc",
"USDCGatewayAddr": "0x33B60d5Dd260d453cAC3782b0bDC01ce84672142",
"LIDOGatewayAddr": "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9",
"DAIGatewayAddr": "0xaC78dff3A87b5b534e366A93E785a0ce8fA6Cc62",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79"
},
"db": {
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",
"driverName": "postgres",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"redis": {
"address": "localhost:6379",
"username": "default",
"password": "",
"local": true,
"minIdleConns": 10,
"readTimeoutMs": 500
}
}

View File

@@ -0,0 +1,34 @@
{
"l1": {
"confirmation": 64,
"endpoint": "https://rpc.ankr.com/eth_goerli",
"startHeight": 8890194 ,
"blockTime": 10,
"MessengerAddr": "0x5260e38080BFe97e6C4925d9209eCc5f964373b6",
"ETHGatewayAddr": "0x429b73A21cF3BF1f3E696a21A95408161daF311f",
"WETHGatewayAddr": "0x8be69E499D8848DfFb4cF9bac909f3e2cF2FeFa0",
"StandardERC20Gateway": "0xeF37207c1A1efF6D6a9d7BfF3cF4270e406d319b",
"CustomERC20GatewayAddr": "0x920f906B814597cF5DC76F95100F09CBAF9c5748",
"ERC721GatewayAddr": "0x1C441Dfc5C2eD7A2AA8636748A664E59CB029157",
"ERC1155GatewayAddr": "0xd1bE599aaCBC21448fD6373bbc7c1b4c7806f135"
},
"l2": {
"confirmation": 1,
"endpoint": "https://alpha-rpc.scroll.io/l2",
"blockTime": 3,
"startHeight": 1600068,
"CustomERC20GatewayAddr": "0xa07Cb742657294C339fB4d5d6CdF3fdBeE8C1c68",
"ERC721GatewayAddr": "0x8Fee20e0C0Ef16f2898a8073531a857D11b9C700",
"StandardERC20Gateway": "0xB878F37BB278bf0e4974856fFe86f5e6F66BD725",
"MessengerAddr": "0xb75d7e84517e1504C151B270255B087Fd746D34C",
"ETHGatewayAddr": "0x32139B5C8838E94fFcD83E60dff95Daa7F0bA14c",
"WETHGatewayAddr": "0xBb88bF582F2BBa46702621dae5CB9271057bC85b",
"ERC1155GatewayAddr": "0x2946cB860028276b3C4bccE1767841641C2E0828"
},
"db": {
"dsn": "postgres://postgres:1234@localhost:5444/test?sslmode=disable",
"driverName": "postgres",
"maxOpenNum": 200,
"maxIdleNum": 20
}
}

View File

@@ -0,0 +1,57 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
)
// DBConfig db config
type DBConfig struct {
// data source name
DSN string `json:"dsn"`
DriverName string `json:"driverName"`
MaxOpenNum int `json:"maxOpenNum"`
MaxIdleNum int `json:"maxIdleNum"`
}
type LayerConfig struct {
Confirmation uint64 `json:"confirmation"`
Endpoint string `json:"endpoint"`
StartHeight uint64 `json:"startHeight"`
BlockTime int64 `json:"blockTime"`
MessengerAddr string `json:"MessengerAddr"`
ETHGatewayAddr string `json:"ETHGatewayAddr"`
WETHGatewayAddr string `json:"WETHGatewayAddr"`
StandardERC20Gateway string `json:"StandardERC20Gateway"`
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
}
// Config is the configuration of the bridge history backend
type Config struct {
// chain config
L1 *LayerConfig `json:"l1"`
L2 *LayerConfig `json:"l2"`
// data source name
DB *DBConfig `json:"db"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &Config{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -0,0 +1,37 @@
package controller
import (
"bridge-history-api/model"
"bridge-history-api/service"
"github.com/ethereum/go-ethereum/common"
)
type QueryAddressController struct {
Service service.HistoryService
}
type QueryHashController struct {
Service service.HistoryService
}
func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
message, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
if err != nil {
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
}
return &model.QueryByAddressResponse{Message: "ok",
Data: &model.Data{
Result: message,
Total: len(message),
}}, nil
}
func (c *QueryHashController) Post(req model.QueryByHashRequest) (*model.QueryByHashResponse, error) {
result, err := c.Service.GetTxsByHashes(req.Txs)
if err != nil {
return &model.QueryByHashResponse{Message: "500", Data: &model.Data{}}, err
}
return &model.QueryByHashResponse{Message: "ok", Data: &model.Data{Result: result}}, nil
}

View File

@@ -0,0 +1,213 @@
package cross_msg
import (
"context"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/modern-go/reflect2"
"bridge-history-api/config"
"bridge-history-api/db"
"bridge-history-api/utils"
)
type CrossMsgFetcher struct {
ctx context.Context
config *config.LayerConfig
db db.OrmFactory
client *ethclient.Client
worker *FetchEventWorker
reorgHandling ReorgHandling
addressList []common.Address
cachedHeaders []*types.Header
mu *sync.Mutex
reorgStartCh chan struct{}
reorgEndCh chan struct{}
}
func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*CrossMsgFetcher, error) {
newMU := &sync.Mutex{}
crossMsgFetcher := &CrossMsgFetcher{
ctx: ctx,
config: config,
db: db,
client: client,
worker: worker,
reorgHandling: reorg,
mu: newMU,
addressList: addressList,
cachedHeaders: make([]*types.Header, 0),
reorgStartCh: make(chan struct{}),
reorgEndCh: make(chan struct{}),
}
return crossMsgFetcher, nil
}
func (c *CrossMsgFetcher) Start() {
log.Info("CrossMsgFetcher Start")
// fetch missing events from finalized blocks, we don't handle reorgs here
c.forwardFetchAndSaveMissingEvents(c.config.Confirmation)
tick := time.NewTicker(time.Duration(c.config.BlockTime) * time.Second)
headerTick := time.NewTicker(time.Duration(c.config.BlockTime/2) * time.Second)
go func() {
for {
select {
case <-c.reorgStartCh:
// create timeout here
timeout := time.NewTicker(300 * time.Second)
select {
case <-c.reorgEndCh:
log.Info("Reorg finished")
timeout.Stop()
case <-timeout.C:
// TODO: need to notify the on-call members to handle reorg manually
timeout.Stop()
log.Crit("Reorg timeout")
}
case <-c.ctx.Done():
tick.Stop()
return
case <-tick.C:
c.mu.Lock()
c.forwardFetchAndSaveMissingEvents(0)
c.mu.Unlock()
}
}
}()
go func() {
for {
select {
case <-c.ctx.Done():
headerTick.Stop()
return
case <-headerTick.C:
c.fetchMissingLatestHeaders()
}
}
}()
}
func (c *CrossMsgFetcher) Stop() {
c.db.Close()
log.Info("CrossMsgFetcher Stop")
}
// forwardFetchAndSaveMissingEvents will fetch all events from the latest processed height to the latest block number.
func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64) {
// if we fetch to the latest block, shall not exceed cachedHeaders
var number uint64
var err error
if len(c.cachedHeaders) != 0 && confirmation <= 0 {
number = c.cachedHeaders[len(c.cachedHeaders)-1].Number.Uint64() - 1
} else {
number, err = utils.GetSafeBlockNumber(c.ctx, c.client, confirmation)
if err != nil {
log.Error(fmt.Sprintf("%s: can not get the safe block number", c.worker.Name), "err", err)
return
}
}
if reflect2.IsNil(c.worker.G) || reflect2.IsNil(c.worker.F) {
log.Error(fmt.Sprintf("%s: invalid get/fetch function", c.worker.Name))
return
}
processed_height, err := c.worker.G(c.db)
if err != nil {
log.Error(fmt.Sprintf("%s: can not get latest processed block height", c.worker.Name))
}
log.Info(fmt.Sprintf("%s: ", c.worker.Name), "height", processed_height)
if processed_height <= 0 || processed_height < int64(c.config.StartHeight) {
processed_height = int64(c.config.StartHeight)
} else {
processed_height += 1
}
for n := processed_height; n <= int64(number); n += FETCH_LIMIT {
iter_end := n + FETCH_LIMIT - 1
if iter_end > int64(number) {
iter_end = int64(number)
}
err := c.worker.F(c.ctx, c.client, c.db, n, iter_end, c.addressList)
if err != nil {
log.Error(fmt.Sprintf("%s: failed!", c.worker.Name), "err", err)
break
}
}
}
func (c *CrossMsgFetcher) fetchMissingLatestHeaders() {
var start int64
number, err := c.client.BlockNumber(c.ctx)
if err != nil {
log.Error("fetchMissingLatestHeaders(): can not get the latest block number", "err", err)
return
}
if len(c.cachedHeaders) > 0 {
start = c.cachedHeaders[len(c.cachedHeaders)-1].Number.Int64() + 1
} else {
start = int64(number - c.config.Confirmation)
}
for i := start; i <= int64(number); i++ {
select {
case <-c.ctx.Done():
close(c.reorgStartCh)
close(c.reorgEndCh)
return
default:
header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(int64(i)))
if err != nil {
log.Error("failed to get latest header", "err", err)
return
}
if len(c.cachedHeaders) == 0 {
c.cachedHeaders = MergeAddIntoHeaderList(c.cachedHeaders, []*types.Header{header}, int(c.config.Confirmation))
return
}
//check if the fetched header is child from the last cached header
if IsParentAndChild(c.cachedHeaders[len(c.cachedHeaders)-1], header) {
c.cachedHeaders = MergeAddIntoHeaderList(c.cachedHeaders, []*types.Header{header}, int(c.config.Confirmation))
log.Debug("fetched block into cache", "height", header.Number, "parent hash", header.ParentHash.Hex(), "block hash", c.cachedHeaders[len(c.cachedHeaders)-1].Hash().Hex(), "len", len(c.cachedHeaders))
continue
}
// reorg happened
log.Warn("Reorg happened", "height", header.Number, "parent hash", header.ParentHash.Hex(), "last cached hash", c.cachedHeaders[len(c.cachedHeaders)-1].Hash().Hex(), "last cached height", c.cachedHeaders[len(c.cachedHeaders)-1].Number)
c.reorgStartCh <- struct{}{}
// waiting here if there is fetcher running
c.mu.Lock()
index, ok, validHeaders := BackwardFindReorgBlock(c.ctx, c.cachedHeaders, c.client, header)
if !ok {
log.Error("Reorg happended too earlier than cached headers", "reorg height", header.Number)
num, err := utils.GetSafeBlockNumber(c.ctx, c.client, c.config.Confirmation)
if err != nil {
log.Crit("Can not get safe number during reorg, quit the process", "err", err)
}
// clear all our saved data, because no data is safe now
err = c.reorgHandling(c.ctx, int64(num), c.db)
// if handling success then we can update the cachedHeaders
if err == nil {
c.cachedHeaders = c.cachedHeaders[:0]
}
c.mu.Unlock()
c.reorgEndCh <- struct{}{}
return
}
err = c.reorgHandling(c.ctx, c.cachedHeaders[index].Number.Int64(), c.db)
// if handling success then we can update the cachedHeaders
if err == nil {
c.cachedHeaders = c.cachedHeaders[:index+1]
c.cachedHeaders = MergeAddIntoHeaderList(c.cachedHeaders, validHeaders, int(c.config.Confirmation))
}
c.mu.Unlock()
c.reorgEndCh <- struct{}{}
}
}
}

View File

@@ -0,0 +1,429 @@
package cross_msg
import (
"context"
"math/big"
geth "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
backendabi "bridge-history-api/abi"
"bridge-history-api/db"
"bridge-history-api/db/orm"
"bridge-history-api/utils"
)
// Todo : read from config
var (
// the number of blocks fetch per round
FETCH_LIMIT = int64(3000)
)
// FetchAndSave is a function type that fetches events from blockchain and saves them to database
type FetchAndSave func(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addressList []common.Address) error
// GetLatestProcessed is a function type that gets the latest processed block height from database
type GetLatestProcessed func(db db.OrmFactory) (int64, error)
type UpdateXHash func(ctx context.Context)
type FetchEventWorker struct {
F FetchAndSave
G GetLatestProcessed
Name string
}
type msgHashWrapper struct {
msgHash common.Hash
txHash common.Hash
}
func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
crossHeight, err := db.GetLatestL1ProcessedHeight()
if err != nil {
log.Error("failed to get L1 cross message processed height: ", "err", err)
return 0, err
}
relayedHeight, err := db.GetLatestRelayedHeightOnL1()
if err != nil {
log.Error("failed to get L1 relayed message processed height: ", "err", err)
return 0, err
}
if crossHeight > relayedHeight {
return crossHeight, nil
} else {
return relayedHeight, nil
}
}
func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
crossHeight, err := db.GetLatestL2ProcessedHeight()
if err != nil {
log.Error("failed to get L2 cross message processed height", "err", err)
return 0, err
}
relayedHeight, err := db.GetLatestRelayedHeightOnL2()
if err != nil {
log.Error("failed to get L2 relayed message processed height", "err", err)
return 0, err
}
if crossHeight > relayedHeight {
return crossHeight, nil
} else {
return relayedHeight, nil
}
}
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: addrList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1RelayedMessageEventSignature
query.Topics[0][3] = backendabi.L1SentMessageEventSignature
query.Topics[0][4] = backendabi.L1DepositERC721Sig
query.Topics[0][5] = backendabi.L1DepositERC1155Sig
query.Topics[0][6] = backendabi.L1DepositWETHSig
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get l1 event logs", "err", err)
return err
}
depositL1CrossMsgs, msgHashes, relayedMsg, err := parseBackendL1EventLogs(logs)
if err != nil {
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
dbTx, err := database.Beginx()
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
return err
}
err = database.BatchInsertL1CrossMsgDBTx(dbTx, depositL1CrossMsgs)
if err != nil {
dbTx.Rollback()
log.Crit("l1FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
}
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
if err != nil {
dbTx.Rollback()
log.Crit("l1FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
}
err = updateL1CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
if err != nil {
dbTx.Rollback()
log.Crit("l1FetchAndSaveEvents: Failed to update msgHash in L1 cross msg", "err", err)
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
dbTx.Rollback()
log.Error("l1FetchAndSaveEvents: Failed to commit db transaction", "err", err)
return err
}
return nil
}
func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: addrList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L2WithdrawETHSig
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
query.Topics[0][2] = backendabi.L2RelayedMessageEventSignature
query.Topics[0][3] = backendabi.L2SentMessageEventSignature
query.Topics[0][4] = backendabi.L2WithdrawERC721Sig
query.Topics[0][5] = backendabi.L2WithdrawERC1155Sig
query.Topics[0][6] = backendabi.L2WithdrawWETHSig
logs, err := client.FilterLogs(ctx, query)
if err != nil {
log.Warn("Failed to get l2 event logs", "err", err)
return err
}
depositL2CrossMsgs, msgHashes, relayedMsg, err := parseBackendL2EventLogs(logs)
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
return err
}
dbTx, err := database.Beginx()
if err != nil {
log.Error("l2FetchAndSaveEvents: Failed to begin db transaction", "err", err)
return err
}
err = database.BatchInsertL2CrossMsgDBTx(dbTx, depositL2CrossMsgs)
if err != nil {
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to insert cross msg event logs", "err", err)
}
err = database.BatchInsertRelayedMsgDBTx(dbTx, relayedMsg)
if err != nil {
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
}
err = updateL2CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
if err != nil {
dbTx.Rollback()
log.Crit("l2FetchAndSaveEvents: Failed to update msgHash in L2 cross msg", "err", err)
}
err = dbTx.Commit()
if err != nil {
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
dbTx.Rollback()
log.Error("l2FetchAndSaveEvents: Failed to commit db transaction", "err", err)
return err
}
return nil
}
func parseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []msgHashWrapper, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l1CrossMsg []*orm.CrossMsg
var relayedMsgs []*orm.RelayedMsg
var msgHashes []msgHashWrapper
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1DepositETHSig:
event := backendabi.DepositETH{}
err := utils.UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
if err != nil {
log.Warn("Failed to unpack DepositETH event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ETH),
Layer1Hash: vlog.TxHash.Hex(),
})
case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC20 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ERC20),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
})
case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := utils.UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC721 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenID: event.TokenID.Uint64(),
})
case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer1Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenID: event.TokenID.Uint64(),
Amount: event.Amount.String(),
})
case backendabi.L1SentMessageEventSignature:
event := backendabi.L1SentMessageEvent{}
err := utils.UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
msgHash := utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
msgHashes = append(msgHashes, msgHashWrapper{
msgHash: msgHash,
txHash: vlog.TxHash})
case backendabi.L1RelayedMessageEventSignature:
event := backendabi.L1RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l1CrossMsg, msgHashes, relayedMsgs, err
}
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(),
Height: vlog.BlockNumber,
Layer1Hash: vlog.TxHash.Hex(),
})
}
}
return l1CrossMsg, msgHashes, relayedMsgs, nil
}
func parseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []msgHashWrapper, []*orm.RelayedMsg, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var l2CrossMsg []*orm.CrossMsg
var relayedMsgs []*orm.RelayedMsg
var msgHashes []msgHashWrapper
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2WithdrawETHSig:
event := backendabi.DepositETH{}
err := utils.UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawETH event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ETH),
Layer2Hash: vlog.TxHash.Hex(),
})
case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Amount: event.Amount.String(),
Asset: int(orm.ERC20),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
})
case backendabi.L2WithdrawERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := utils.UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC721),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenID: event.TokenID.Uint64(),
})
case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
Height: vlog.BlockNumber,
Sender: event.From.String(),
Target: event.To.String(),
Asset: int(orm.ERC1155),
Layer2Hash: vlog.TxHash.Hex(),
Layer1Token: event.L1Token.Hex(),
Layer2Token: event.L2Token.Hex(),
TokenID: event.TokenID.Uint64(),
Amount: event.Amount.String(),
})
case backendabi.L2SentMessageEventSignature:
event := backendabi.L2SentMessageEvent{}
err := utils.UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
msgHash := utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
msgHashes = append(msgHashes, msgHashWrapper{
msgHash: msgHash,
txHash: vlog.TxHash})
case backendabi.L2RelayedMessageEventSignature:
event := backendabi.L2RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return l2CrossMsg, msgHashes, relayedMsgs, err
}
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
MsgHash: event.MessageHash.String(),
Height: vlog.BlockNumber,
Layer2Hash: vlog.TxHash.Hex(),
})
}
}
return l2CrossMsg, msgHashes, relayedMsgs, nil
}
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []msgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.txHash, msgHash.msgHash)
if err != nil {
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.txHash, "err", err)
continue
}
}
return nil
}
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []msgHashWrapper) error {
for _, msgHash := range msgHashes {
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.txHash, msgHash.msgHash)
if err != nil {
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.txHash, "err", err)
continue
}
}
return nil
}

View File

@@ -0,0 +1,64 @@
package cross_msg_test
import (
"bridge-history-api/cross_msg"
"crypto/rand"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
func TestMergeIntoList(t *testing.T) {
headers, err := generateHeaders(64)
assert.NoError(t, err)
assert.Equal(t, headers[0].Hash(), headers[1].ParentHash)
headers2, err := generateHeaders(18)
assert.NoError(t, err)
result := cross_msg.MergeAddIntoHeaderList(headers, headers2, 64)
assert.Equal(t, 64, len(result))
assert.Equal(t, headers2[len(headers2)-1], result[len(result)-1])
assert.NotEqual(t, headers[0], result[0])
}
func generateHeaders(amount int) ([]*types.Header, error) {
headers := make([]*types.Header, amount)
for i := 0; i < amount; i++ {
var parentHash common.Hash
if i > 0 {
parentHash = headers[i-1].Hash()
}
nonce, err := rand.Int(rand.Reader, big.NewInt(1<<63-1))
if err != nil {
return nil, err
}
difficulty := big.NewInt(131072)
header := &types.Header{
ParentHash: parentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: common.Address{},
Root: common.Hash{},
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
Bloom: types.Bloom{},
Difficulty: difficulty,
Number: big.NewInt(int64(i)),
GasLimit: 5000000,
GasUsed: 0,
Time: uint64(i * 15),
Extra: []byte{},
MixDigest: common.Hash{},
Nonce: types.EncodeNonce(uint64(nonce.Uint64())),
}
headers[i] = header
}
return headers, nil
}
// TODO: add more test cases
// func TestReorg(t *testing.T)

View File

@@ -0,0 +1,104 @@
package cross_msg
import (
"context"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"bridge-history-api/db"
)
type ReorgHandling func(ctx context.Context, reorgHeight int64, db db.OrmFactory) error
func reverseArray(arr []*types.Header) []*types.Header {
for i := 0; i < len(arr)/2; i++ {
j := len(arr) - i - 1
arr[i], arr[j] = arr[j], arr[i]
}
return arr
}
func IsParentAndChild(parentHeader *types.Header, header *types.Header) bool {
return header.ParentHash == parentHeader.Hash()
}
func MergeAddIntoHeaderList(baseArr, extraArr []*types.Header, maxLength int) []*types.Header {
mergedArr := append(baseArr, extraArr...)
if len(mergedArr) <= maxLength {
return mergedArr
}
startIndex := len(mergedArr) - maxLength
return mergedArr[startIndex:]
}
func BackwardFindReorgBlock(ctx context.Context, headers []*types.Header, client *ethclient.Client, header *types.Header) (int, bool, []*types.Header) {
maxStep := len(headers)
backwardHeaderList := []*types.Header{header}
for iterRound := 0; iterRound < maxStep; iterRound++ {
header, err := client.HeaderByHash(ctx, header.ParentHash)
if err != nil {
log.Error("BackwardFindReorgBlock failed", "error", err)
return -1, false, nil
}
backwardHeaderList = append(backwardHeaderList, header)
for j := len(headers) - 1; j >= 0; j-- {
if IsParentAndChild(headers[j], header) {
backwardHeaderList = reverseArray(backwardHeaderList)
return j, true, backwardHeaderList
}
}
}
return -1, false, nil
}
func L1ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
dbTx, err := db.Beginx()
if err != nil {
log.Crit("begin db tx failed", "err", err)
}
err = db.DeleteL1CrossMsgAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
log.Crit("delete l1 cross msg from height", "height", reorgHeight, "err", err)
}
err = db.DeleteL1RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
log.Crit("delete l1 relayed hash from height", "height", reorgHeight, "err", err)
}
err = dbTx.Commit()
if err != nil {
dbTx.Rollback()
log.Error("commit tx failed", "err", err)
return err
}
return nil
}
func L2ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) error {
dbTx, err := db.Beginx()
if err != nil {
dbTx.Rollback()
log.Crit("begin db tx failed", "err", err)
}
err = db.DeleteL2CrossMsgFromHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
log.Crit("delete l2 cross msg from height", "height", reorgHeight, "err", err)
}
err = db.DeleteL2RelayedHashAfterHeightDBTx(dbTx, reorgHeight)
if err != nil {
dbTx.Rollback()
log.Crit("delete l2 relayed hash from height", "height", reorgHeight, "err", err)
}
err = dbTx.Commit()
if err != nil {
dbTx.Rollback()
log.Error("commit tx failed", "err", err)
return err
}
return nil
}

View File

@@ -18,7 +18,7 @@ const MigrationsDir string = "migrations"
func init() {
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("bridge_historyv2_migrations")
goose.SetTableName("bridge_history_migrations")
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
goose.SetVerbose(verbose)

View File

@@ -0,0 +1,72 @@
-- +goose Up
-- +goose StatementBegin
create table cross_message
(
id BIGSERIAL PRIMARY KEY,
msg_hash VARCHAR NOT NULL DEFAULT '',
height BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
amount VARCHAR NOT NULL,
layer1_hash VARCHAR NOT NULL DEFAULT '',
layer2_hash VARCHAR NOT NULL DEFAULT '',
layer1_token VARCHAR NOT NULL DEFAULT '',
layer2_token VARCHAR NOT NULL DEFAULT '',
token_id BIGINT NOT NULL DEFAULT 0,
asset SMALLINT NOT NULL,
msg_type SMALLINT NOT NULL,
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
comment
on column cross_message.asset is 'ETH, ERC20, ERC721, ERC1155';
comment
on column cross_message.msg_type is 'unknown, l1msg, l2msg';
comment
on column cross_message.is_deleted is 'NotDeleted false, Deleted true';
CREATE INDEX valid_l1_msg_index ON cross_message (layer1_hash, is_deleted);
CREATE INDEX valid_l2_msg_index ON cross_message (layer2_hash, is_deleted);
CREATE INDEX valid_height_index ON cross_message (height, msg_type, is_deleted);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON cross_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
CREATE OR REPLACE FUNCTION delete_at_trigger()
RETURNS TRIGGER AS $$
BEGIN
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
UPDATE cross_message SET delete_at = NOW() WHERE id = NEW.id;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER delete_at_trigger
AFTER UPDATE ON cross_message
FOR EACH ROW
EXECUTE FUNCTION delete_at_trigger();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists cross_message;
-- +goose StatementEnd

View File

@@ -0,0 +1,55 @@
-- +goose Up
-- +goose StatementBegin
create table relayed_msg
(
id BIGSERIAL PRIMARY KEY,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
layer1_hash VARCHAR NOT NULL DEFAULT '',
layer2_hash VARCHAR NOT NULL DEFAULT '',
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
comment
on column relayed_msg.is_deleted is 'NotDeleted, Deleted';
create unique index relayed_msg_hash_uindex
on relayed_msg (msg_hash);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON relayed_msg FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
CREATE OR REPLACE FUNCTION delete_at_trigger()
RETURNS TRIGGER AS $$
BEGIN
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
UPDATE relayed_msg SET delete_at = NOW() WHERE id = NEW.id;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER delete_at_trigger
AFTER UPDATE ON relayed_msg
FOR EACH ROW
EXECUTE FUNCTION delete_at_trigger();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists relayed_msg;
-- +goose StatementEnd

View File

@@ -0,0 +1,100 @@
package orm
import (
"context"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/jmoiron/sqlx"
)
type AssetType int
type MsgType int
func (a AssetType) String() string {
switch a {
case ETH:
return "ETH"
case ERC20:
return "ERC20"
case ERC1155:
return "ERC1155"
case ERC721:
return "ERC721"
}
return "Unknown Asset Type"
}
const (
ETH AssetType = iota
ERC20
ERC721
ERC1155
)
const (
UnknownMsg MsgType = iota
Layer1Msg
Layer2Msg
)
// CrossMsg represents a cross message from layer 1 to layer 2
type CrossMsg struct {
ID uint64 `json:"id" db:"id"`
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Sender string `json:"sender" db:"sender"`
Target string `json:"target" db:"target"`
Amount string `json:"amount" db:"amount"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
Layer1Token string `json:"layer1_token" db:"layer1_token"`
Layer2Token string `json:"layer2_token" db:"layer2_token"`
TokenID uint64 `json:"token_id" db:"token_id"`
Asset int `json:"asset" db:"asset"`
MsgType int `json:"msg_type" db:"msg_type"`
IsDeleted bool `json:"is_deleted" db:"is_deleted"`
CreatedAt *time.Time `json:"created_at" db:"created_at"`
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
}
type RelayedMsg struct {
MsgHash string `json:"msg_hash" db:"msg_hash"`
Height uint64 `json:"height" db:"height"`
Layer1Hash string `json:"layer1_hash" db:"layer1_hash"`
Layer2Hash string `json:"layer2_hash" db:"layer2_hash"`
}
// L1CrossMsgOrm provides operations on l1_cross_message table
type L1CrossMsgOrm interface {
GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error)
GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error)
BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error
// UpdateL1CrossMsgHash invoked when SentMessage event is received
UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error
UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error
GetLatestL1ProcessedHeight() (int64, error)
DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
}
// L2CrossMsgOrm provides operations on l2_cross_message table
type L2CrossMsgOrm interface {
GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error)
GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error)
BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error
// UpdateL2CrossMsgHash invoked when SentMessage event is received
UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error
UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error
GetLatestL2ProcessedHeight() (int64, error)
DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error
}
type RelayedMsgOrm interface {
BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*RelayedMsg) error
GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error)
GetLatestRelayedHeightOnL1() (int64, error)
GetLatestRelayedHeightOnL2() (int64, error)
DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
}

View File

@@ -0,0 +1,120 @@
package orm
import (
"context"
"database/sql"
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type l1CrossMsgOrm struct {
db *sqlx.DB
}
// NewL1CrossMsgOrm create an NewL1CrossMsgOrm instance
func NewL1CrossMsgOrm(db *sqlx.DB) L1CrossMsgOrm {
return &l1CrossMsgOrm{db: db}
}
func (l *l1CrossMsgOrm) GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error) {
result := &CrossMsg{}
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer1_hash = $1 AND msg_type = $2 AND NOT is_deleted;`, l1Hash.String(), Layer1Msg)
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
}
return nil, err
}
return result, nil
}
// GetL1CrossMsgsByAddress returns all layer1 cross messages under given address
// Warning: return empty slice if no data found
func (l *l1CrossMsgOrm) GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = 1 AND NOT is_deleted;`, sender.String(), Layer1Msg)
for rows.Next() {
msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
if len(results) == 0 && errors.Is(err, sql.ErrNoRows) {
} else if err != nil {
return nil, err
}
return results, nil
}
func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error {
if len(messages) == 0 {
return nil
}
var err error
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"height": msg.Height,
"sender": msg.Sender,
"target": msg.Target,
"amount": msg.Amount,
"asset": msg.Asset,
"layer1_hash": msg.Layer1Hash,
"layer1_token": msg.Layer1Token,
"layer2_token": msg.Layer2Token,
"token_id": msg.TokenID,
"msg_type": Layer1Msg,
}
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_id, amount, msg_type) select :height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type WHERE NOT EXISTS (SELECT 1 FROM cross_message WHERE layer1_hash = :layer1_hash AND NOT is_deleted);`, messageMaps[i])
if err != nil {
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "l1hashes", msg.Layer1Hash, "heights", msg.Height, "err", err)
break
}
}
return err
}
// UpdateL1CrossMsgHashDBTx update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer1_hash = ? AND NOT is_deleted;"), msgHash.String(), l1Hash.String()); err != nil {
return err
}
return nil
}
func (l *l1CrossMsgOrm) UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.l1_cross_message set msg_hash = ? where layer1_hash = ? AND NOT is_deleted;"), msgHash.String(), l1Hash.String()); err != nil {
return err
}
return nil
}
func (l *l1CrossMsgOrm) GetLatestL1ProcessedHeight() (int64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND NOT is_deleted ORDER BY id DESC LIMIT 1;`, Layer1Msg)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return 0, err
}
if result.Valid {
return result.Int64, nil
}
return 0, nil
}
func (l *l1CrossMsgOrm) DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
if _, err := l.db.Exec(`UPDATE cross_message SET is_deleted = true WHERE height > $1 AND msg_type = $2;`, height, Layer1Msg); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,123 @@
package orm
import (
"context"
"database/sql"
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type l2CrossMsgOrm struct {
db *sqlx.DB
}
// NewL2CrossMsgOrm create an NewL2CrossMsgOrm instance
func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
return &l2CrossMsgOrm{db: db}
}
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
result := &CrossMsg{}
row := l.db.QueryRowx(`SELECT * FROM l2_cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
}
return nil, err
}
return result, nil
}
// GetL2CrossMsgsByAddress returns all layer2 cross messages under given address
// Warning: return empty slice if no data found
func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) {
var results []*CrossMsg
rows, err := l.db.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND msg_type = $2 AND NOT is_deleted;`, sender.String(), Layer2Msg)
for rows.Next() {
msg := &CrossMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
if len(results) == 0 && errors.Is(err, sql.ErrNoRows) {
// log.Warn("no unprocessed layer1 messages in db", "err", err)
} else if err != nil {
return nil, err
}
return results, nil
}
func (l *l2CrossMsgOrm) DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE cross_message SET is_deleted = true where height > $1 AND msg_type = $2 ;`, height, Layer2Msg)
if err != nil {
log.Error("DeleteL1CrossMsgAfterHeightDBTx: failed to delete", "height", height, "err", err)
return err
}
return nil
}
func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error {
if len(messages) == 0 {
return nil
}
var err error
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"height": msg.Height,
"sender": msg.Sender,
"target": msg.Target,
"asset": msg.Asset,
"layer2_hash": msg.Layer2Hash,
"layer1_token": msg.Layer1Token,
"layer2_token": msg.Layer2Token,
"token_id": msg.TokenID,
"amount": msg.Amount,
"msg_type": Layer2Msg,
}
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_id, amount, msg_type) select :height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type WHERE NOT EXISTS (SELECT 1 FROM cross_message WHERE layer2_hash = :layer2_hash AND NOT is_deleted);`, messageMaps[i])
if err != nil {
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "layer2hash", msg.Layer2Hash, "heights", msg.Height, "err", err)
break
}
}
return err
}
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error {
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer2_hash = ? AND NOT is_deleted;"), msgHash.String(), l2Hash.String()); err != nil {
return err
}
return nil
}
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error {
if _, err := l.db.ExecContext(ctx, l.db.Rebind("update public.cross_message set msg_hash = ? where layer2_hash = ? AND NOT is_deleted;"), msgHash.String(), l2Hash.String()); err != nil {
return err
}
return nil
}
func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE msg_type = $1 AND NOT is_deleted ORDER BY id DESC LIMIT 1;`, Layer2Msg)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return 0, err
}
if result.Valid {
return result.Int64, nil
}
return 0, nil
}

View File

@@ -0,0 +1,94 @@
package orm
import (
"database/sql"
"errors"
"strings"
"github.com/ethereum/go-ethereum/log"
"github.com/jmoiron/sqlx"
)
type relayedMsgOrm struct {
db *sqlx.DB
}
// NewRelayedMsgOrm create an NewRelayedMsgOrm instance
func NewRelayedMsgOrm(db *sqlx.DB) RelayedMsgOrm {
return &relayedMsgOrm{db: db}
}
func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*RelayedMsg) error {
if len(messages) == 0 {
return nil
}
var err error
messageMaps := make([]map[string]interface{}, len(messages))
for i, msg := range messages {
messageMaps[i] = map[string]interface{}{
"msg_hash": msg.MsgHash,
"height": msg.Height,
"layer1_hash": msg.Layer1Hash,
"layer2_hash": msg.Layer2Hash,
}
_, err = dbTx.NamedExec(`insert into relayed_msg(msg_hash, height, layer1_hash, layer2_hash) values(:msg_hash, :height, :layer1_hash, :layer2_hash);`, messageMaps[i])
if err != nil && !strings.Contains(err.Error(), "pq: duplicate key value violates unique constraint \"relayed_msg_hash_uindex") {
log.Error("BatchInsertRelayedMsgDBTx: failed to insert l1 cross msgs", "msg_Hashe", msg.MsgHash, "height", msg.Height, "err", err)
break
}
}
return err
}
func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error) {
result := &RelayedMsg{}
row := l.db.QueryRowx(`SELECT msg_hash, height, layer1_hash, layer2_hash FROM relayed_msg WHERE msg_hash = $1 AND NOT is_deleted;`, msg_hash)
if err := row.StructScan(result); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
}
return nil, err
}
return result, nil
}
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL1() (int64, error) {
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer1_hash != '' AND NOT is_deleted ORDER BY height DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return 0, err
}
if result.Valid {
return result.Int64, nil
}
return 0, nil
}
func (l *relayedMsgOrm) GetLatestRelayedHeightOnL2() (int64, error) {
row := l.db.QueryRow(`SELECT height FROM relayed_msg WHERE layer2_hash != '' AND NOT is_deleted ORDER BY height DESC LIMIT 1;`)
var result sql.NullInt64
if err := row.Scan(&result); err != nil {
if err == sql.ErrNoRows || !result.Valid {
return -1, nil
}
return 0, err
}
if result.Valid {
return result.Int64, nil
}
return 0, nil
}
func (l *relayedMsgOrm) DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE relayed_msg SET is_deleted = true WHERE height > $1 AND layer1_hash != '';`, height)
return err
}
func (l *relayedMsgOrm) DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
_, err := dbTx.Exec(`UPDATE relayed_msg SET is_deleted = true WHERE height > $1 AND layer2_hash != '';`, height)
return err
}

View File

@@ -0,0 +1,81 @@
package db
import (
"database/sql"
"errors"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint
"bridge-history-api/config"
"bridge-history-api/db/orm"
)
// OrmFactory include all ormFactory interface
type OrmFactory interface {
orm.L1CrossMsgOrm
orm.L2CrossMsgOrm
orm.RelayedMsgOrm
GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error)
GetDB() *sqlx.DB
Beginx() (*sqlx.Tx, error)
Close() error
}
type ormFactory struct {
orm.L1CrossMsgOrm
orm.L2CrossMsgOrm
orm.RelayedMsgOrm
*sqlx.DB
}
// NewOrmFactory create an ormFactory factory include all ormFactory interface
func NewOrmFactory(cfg *config.Config) (OrmFactory, error) {
// Initialize sql/sqlx
db, err := sqlx.Open(cfg.DB.DriverName, cfg.DB.DSN)
if err != nil {
return nil, err
}
db.SetMaxOpenConns(cfg.DB.MaxOpenNum)
db.SetMaxIdleConns(cfg.DB.MaxIdleNum)
if err = db.Ping(); err != nil {
return nil, err
}
return &ormFactory{
L1CrossMsgOrm: orm.NewL1CrossMsgOrm(db),
L2CrossMsgOrm: orm.NewL2CrossMsgOrm(db),
RelayedMsgOrm: orm.NewRelayedMsgOrm(db),
DB: db,
}, nil
}
func (o *ormFactory) GetDB() *sqlx.DB {
return o.DB
}
func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
return o.DB.Beginx()
}
func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error) {
para := sender
var results []*orm.CrossMsg
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND NOT is_deleted ORDER BY id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
if err != nil || rows == nil {
return nil, err
}
for rows.Next() {
msg := &orm.CrossMsg{}
if err = rows.StructScan(msg); err != nil {
break
}
results = append(results, msg)
}
if len(results) == 0 && errors.Is(err, sql.ErrNoRows) {
} else if err != nil {
return nil, err
}
return results, nil
}

View File

@@ -1,92 +1,146 @@
module scroll-tech/bridge-history-api
module bridge-history-api
go 1.20
require (
github.com/gin-contrib/cors v1.5.0
github.com/gin-gonic/gin v1.9.1
github.com/go-redis/redis/v8 v8.11.5
github.com/google/uuid v1.4.0
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.14.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c
github.com/stretchr/testify v1.8.4
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.5.0
gorm.io/gorm v1.25.5
github.com/ethereum/go-ethereum v1.11.6
github.com/jmoiron/sqlx v1.3.5
github.com/kataras/iris/v12 v12.2.0
github.com/lib/pq v1.10.7
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.18
github.com/modern-go/reflect2 v1.0.2
github.com/pressly/goose/v3 v3.7.0
github.com/stretchr/testify v1.8.2
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
)
require (
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/BurntSushi/toml v1.2.1 // indirect
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 // indirect
github.com/CloudyKit/jet/v6 v6.2.0 // indirect
github.com/DataDog/zstd v1.5.2 // indirect
github.com/Joker/jade v1.1.3 // indirect
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/bytedance/sonic v1.10.1 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/cockroachdb/errors v1.9.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
github.com/cockroachdb/redact v1.1.3 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v20.10.21+incompatible // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/flosch/pongo2/v4 v4.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.15.5 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/gobwas/httphead v0.1.0 // indirect
github.com/gobwas/pool v0.2.1 // indirect
github.com/gobwas/ws v1.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/css v1.0.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.2 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/iris-contrib/go.uuid v2.0.0+incompatible // indirect
github.com/iris-contrib/schema v0.0.6 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/kataras/blocks v0.0.7 // indirect
github.com/kataras/golog v0.1.8 // indirect
github.com/kataras/neffos v0.0.21 // indirect
github.com/kataras/pio v0.0.11 // indirect
github.com/kataras/sitemap v0.0.6 // indirect
github.com/kataras/tunnel v0.0.4 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/mailgun/raymond/v2 v2.0.48 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mediocregopher/radix/v3 v3.8.1 // indirect
github.com/microcosm-cc/bluemonday v1.0.23 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/nats-io/nats.go v1.23.0 // indirect
github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.27.1 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.7.1 // indirect
github.com/sethvargo/go-retry v0.2.4 // indirect
github.com/schollz/closestmatch v2.1.0+incompatible // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tdewolff/minify/v2 v2.12.4 // indirect
github.com/tdewolff/parse/v2 v2.6.4 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.16.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
github.com/yosssi/ace v0.0.5 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/protobuf v1.31.0 // indirect
golang.org/x/tools v0.8.0 // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
google.golang.org/protobuf v1.29.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -1,17 +1,43 @@
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/ClickHouse/ch-go v0.58.2 h1:jSm2szHbT9MCAB1rJ3WuCJqmGLi5UTjlNu+f530UTS0=
github.com/ClickHouse/clickhouse-go/v2 v2.15.0 h1:G0hTKyO8fXXR1bGnZ0DY3vTG01xYfOGW76zgjg5tmC4=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 h1:sR+/8Yb4slttB4vD+b9btVEnWgL3Q00OBTzVT8B9C0c=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
github.com/CloudyKit/jet/v6 v6.2.0 h1:EpcZ6SR9n28BUGtNJSvlBqf90IpjeFr36Tizxhn/oME=
github.com/CloudyKit/jet/v6 v6.2.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4=
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/Joker/hpp v1.0.0 h1:65+iuJYdRXv/XyN62C1uEmmOx3432rNG/rKlX6V7Kkc=
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.1.3 h1:Qbeh12Vq6BxURXT1qZBRHsDxeURB8ztcL6f3EXSGeHk=
github.com/Joker/jade v1.1.3/go.mod h1:T+2WLyt7VH6Lp0TRxQrUYEs64nRc83wkMQrfeIQKduM=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06 h1:KkH3I3sJuOLP3TjA/dfr4NAY8bghDwnXiU7cTKxQqo0=
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM=
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
@@ -19,311 +45,678 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0=
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8=
github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk=
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk=
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM=
github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T/Lao=
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum/go-ethereum v1.11.6 h1:2VF8Mf7XiSUfmoNOy3D+ocfl9Qu8baQBrCNbo2CXQ8E=
github.com/ethereum/go-ethereum v1.11.6/go.mod h1:+a8pUj1tOyJ2RinsNQD4326YS+leSoKGiG/uVVb0x6Y=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flosch/pongo2/v4 v4.0.2 h1:gv+5Pe3vaSVmiJvh/BZa82b7/00YUGm0PIyVVLop0Hw=
github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk=
github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/gobwas/ws v1.1.0 h1:7RFti/xnNkMJnrK7D1yQ/iCIB5OrrY/54/H930kIbHA=
github.com/gobwas/ws v1.1.0/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk=
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k=
github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs=
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM=
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
github.com/iris-contrib/go.uuid v2.0.0+incompatible h1:XZubAYg61/JwnJNbZilGjf3b3pB80+OQg2qf6c8BfWE=
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
github.com/iris-contrib/httpexpect/v2 v2.12.1 h1:3cTZSyBBen/kfjCtgNFoUKi1u0FVXNaAjyRJOo6AVS4=
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
github.com/iris-contrib/schema v0.0.6 h1:CPSBLyx2e91H2yJzPuhGuifVRnZBBJ3pCOMbOvPZaTw=
github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/kataras/blocks v0.0.7 h1:cF3RDY/vxnSRezc7vLFlQFTYXG/yAr1o7WImJuZbzC4=
github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I=
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
github.com/kataras/golog v0.1.8 h1:isP8th4PJH2SrbkciKnylaND9xoTtfxv++NB+DF0l9g=
github.com/kataras/golog v0.1.8/go.mod h1:rGPAin4hYROfk1qT9wZP6VY2rsb4zzc37QpdPjdkqVw=
github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
github.com/kataras/iris/v12 v12.2.0 h1:WzDY5nGuW/LgVaFS5BtTkW3crdSKJ/FEgWnxPnIVVLI=
github.com/kataras/iris/v12 v12.2.0/go.mod h1:BLzBpEunc41GbE68OUaQlqX4jzi791mx5HU04uPb90Y=
github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
github.com/kataras/neffos v0.0.21 h1:UwN/F44jlqdtgFI29y3VhA7IlJ4JbK3UjCbTDg1pYoo=
github.com/kataras/neffos v0.0.21/go.mod h1:FeGka8lu8cjD2H+0OpBvW8c6xXawy3fj5VX6xcIJ1Fg=
github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
github.com/kataras/pio v0.0.11 h1:kqreJ5KOEXGMwHAWHDwIl+mjfNCPhAwZPa8gK7MKlyw=
github.com/kataras/pio v0.0.11/go.mod h1:38hH6SWH6m4DKSYmRhlrCJ5WItwWgCVrTNU62XZyUvI=
github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
github.com/kataras/sitemap v0.0.6 h1:w71CRMMKYMJh6LR2wTgnk5hSgjVNB9KL60n5e2KHvLY=
github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4=
github.com/kataras/tunnel v0.0.4 h1:sCAqWuJV7nPzGrlb0os3j49lk2JhILT0rID38NHNLpA=
github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw=
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M=
github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/microcosm-cc/bluemonday v1.0.23 h1:SMZe2IGa0NuHvnVNAZ+6B38gsTbi5e4sViiWJyDDqFY=
github.com/microcosm-cc/bluemonday v1.0.23/go.mod h1:mN70sk7UkkF8TUr2IGBpNN0jAgStuPzlK76QuruE/z4=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/nats-io/jwt v0.3.0 h1:xdnzwFETV++jNc4W1mw//qFyJGb2ABOombmZJQS4+Qo=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI=
github.com/nats-io/nats-server/v2 v2.9.11 h1:4y5SwWvWI59V5mcqtuoqKq6L9NDUydOP3Ekwuwl8cZI=
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nats.go v1.23.0 h1:lR28r7IX44WjYgdiKz9GmUeW0uh/m33uD3yEjLZ2cOE=
github.com/nats-io/nats.go v1.23.0/go.mod h1:ki/Scsa23edbh8IRZbCuNXR9TDcbvfaSijKtaqQgw+Q=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=
github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4=
github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.16.0 h1:xMJUsZdHLqSnCqESyKSqEfcYVYsUuup1nrOhaEFftQg=
github.com/pressly/goose/v3 v3.16.0/go.mod h1:JwdKVnmCRhnF6XLQs2mHEQtucFD49cQBdRM4UiwkxsM=
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c h1:MnAdt80steCDli4SAD0J0spBGNY+gQvbdptNjWztHcw=
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c/go.mod h1:4HrFcoStbViFVy/9l/rvKl1XmizVAaPdgqI8v0U8hOc=
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo=
github.com/schollz/closestmatch v2.1.0+incompatible h1:Uel2GXEpJqOWBrlyI+oY9LTiyyjYS17cCYRqP13/SHk=
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tdewolff/minify/v2 v2.12.4 h1:kejsHQMM17n6/gwdw53qsi6lg0TGddZADVyQOz1KMdE=
github.com/tdewolff/minify/v2 v2.12.4/go.mod h1:h+SRvSIX3kwgwTFOpSckvSxgax3uy8kZTSF1Ojrr3bk=
github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZycQ=
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM=
github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dzWP1Lu+A40W883dK/Mr3xyDSM/2MggS8GtHT0qgAnE=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
github.com/yosssi/ace v0.0.5 h1:tUkIP/BLdKqrlrPwcmH0shwEEhTRHoGnc1wFIWmaBUA=
github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0=
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201207223542-d4d67f95c62d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0=
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
modernc.org/libc v1.32.0 h1:yXatHTrACp3WaKNRCoZwUK7qj5V8ep1XyY0ka4oYcNc=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs=

View File

@@ -1,66 +0,0 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
"scroll-tech/common/database"
)
// LayerConfig is the configuration of Layer1/Layer2
type LayerConfig struct {
Confirmation uint64 `json:"confirmation"`
Endpoint string `json:"endpoint"`
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, otherwise in the current implementation, the message proof could not be successfully updated.
BlockTime int64 `json:"blockTime"`
FetchLimit uint64 `json:"fetchLimit"`
MessengerAddr string `json:"MessengerAddr"`
ETHGatewayAddr string `json:"ETHGatewayAddr"`
StandardERC20GatewayAddr string `json:"StandardERC20GatewayAddr"`
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
WETHGatewayAddr string `json:"WETHGatewayAddr"`
DAIGatewayAddr string `json:"DAIGatewayAddr"`
USDCGatewayAddr string `json:"USDCGatewayAddr"`
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
ScrollChainAddr string `json:"ScrollChainAddr"`
GatewayRouterAddr string `json:"GatewayRouterAddr"`
MessageQueueAddr string `json:"MessageQueueAddr"`
}
// RedisConfig redis config
type RedisConfig struct {
Address string `json:"address"`
Username string `json:"username"`
Password string `json:"password"`
DB int `json:"db"`
Local bool `json:"local"`
MinIdleConns int `json:"minIdleConns"`
ReadTimeoutMs int `json:"readTimeoutMs"`
}
// Config is the configuration of the bridge history backend
type Config struct {
L1 *LayerConfig `json:"L1"`
L2 *LayerConfig `json:"L2"`
DB *database.Config `json:"db"`
Redis *RedisConfig `json:"redis"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &Config{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -1,22 +0,0 @@
package api
import (
"sync"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
)
var (
// HistoryCtrler is controller instance
HistoryCtrler *HistoryController
initControllerOnce sync.Once
)
// InitController inits Controller with database
func InitController(db *gorm.DB, redis *redis.Client) {
initControllerOnce.Do(func() {
HistoryCtrler = NewHistoryController(db, redis)
})
}

View File

@@ -1,94 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// HistoryController contains the query claimable txs service
type HistoryController struct {
historyLogic *logic.HistoryLogic
}
// NewHistoryController return HistoryController instance
func NewHistoryController(db *gorm.DB, redis *redis.Client) *HistoryController {
return &HistoryController{
historyLogic: logic.NewHistoryLogic(db, redis),
}
}
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
func (c *HistoryController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// GetL2WithdrawalsByAddress defines the http get method behavior
func (c *HistoryController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// GetTxsByAddress defines the http get method behavior
func (c *HistoryController) GetTxsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// PostQueryTxsByHashes defines the http post method behavior
func (c *HistoryController) PostQueryTxsByHashes(ctx *gin.Context) {
var req types.QueryByHashRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
return
}
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,153 +0,0 @@
package fetcher
import (
"context"
"math/big"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/utils"
)
// L1MessageFetcher fetches cross message events from L1 and saves them to database.
type L1MessageFetcher struct {
ctx context.Context
cfg *config.LayerConfig
client *ethclient.Client
l1SyncHeight uint64
l1LastSyncBlockHash common.Hash
eventUpdateLogic *logic.EventUpdateLogic
l1FetcherLogic *logic.L1FetcherLogic
l1MessageFetcherRunningTotal prometheus.Counter
l1MessageFetcherReorgTotal prometheus.Counter
l1MessageFetcherSyncHeight prometheus.Gauge
}
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
func NewL1MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
c := &L1MessageFetcher{
ctx: ctx,
cfg: cfg,
client: client,
eventUpdateLogic: logic.NewEventUpdateLogic(db, true),
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client),
}
reg := prometheus.DefaultRegisterer
c.l1MessageFetcherRunningTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "L1_message_fetcher_running_total",
Help: "Current count of running L1 message fetcher instances.",
})
c.l1MessageFetcherReorgTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "L1_message_fetcher_reorg_total",
Help: "Total count of blockchain reorgs encountered by the L1 message fetcher.",
})
c.l1MessageFetcherSyncHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "L1_message_fetcher_sync_height",
Help: "Latest blockchain height the L1 message fetcher has synced with.",
})
return c
}
// Start starts the L1 message fetching process.
func (c *L1MessageFetcher) Start() {
messageSyncedHeight, batchSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
if dbErr != nil {
log.Crit("L1MessageFetcher start failed", "err", dbErr)
}
l1SyncHeight := messageSyncedHeight
if batchSyncedHeight > l1SyncHeight {
l1SyncHeight = batchSyncedHeight
}
if c.cfg.StartHeight > l1SyncHeight {
l1SyncHeight = c.cfg.StartHeight - 1
}
// Sync from an older block to prevent reorg during restart.
if l1SyncHeight < logic.L1ReorgSafeDepth {
l1SyncHeight = 0
} else {
l1SyncHeight -= logic.L1ReorgSafeDepth
}
header, err := c.client.HeaderByNumber(c.ctx, new(big.Int).SetUint64(l1SyncHeight))
if err != nil {
log.Crit("failed to get L1 header by number", "block number", l1SyncHeight, "err", err)
return
}
c.updateL1SyncHeight(l1SyncHeight, header.Hash())
log.Info("Start L1 message fetcher", "message synced height", messageSyncedHeight, "batch synced height", batchSyncedHeight, "config start height", c.cfg.StartHeight, "sync start height", c.l1SyncHeight+1)
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
go func() {
for {
select {
case <-c.ctx.Done():
tick.Stop()
return
case <-tick.C:
c.fetchAndSaveEvents(c.cfg.Confirmation)
}
}
}()
}
func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
c.l1MessageFetcherRunningTotal.Inc()
startHeight := c.l1SyncHeight + 1
endHeight, rpcErr := utils.GetBlockNumber(c.ctx, c.client, confirmation)
if rpcErr != nil {
log.Error("failed to get L1 block number", "confirmation", confirmation, "err", rpcErr)
return
}
log.Info("fetch and save missing L1 events", "start height", startHeight, "end height", endHeight, "confirmation", confirmation)
for from := startHeight; from <= endHeight; from += c.cfg.FetchLimit {
to := from + c.cfg.FetchLimit - 1
if to > endHeight {
to = endHeight
}
isReorg, resyncHeight, lastBlockHash, l1FetcherResult, fetcherErr := c.l1FetcherLogic.L1Fetcher(c.ctx, from, to, c.l1LastSyncBlockHash)
if fetcherErr != nil {
log.Error("failed to fetch L1 events", "from", from, "to", to, "err", fetcherErr)
return
}
if isReorg {
c.l1MessageFetcherReorgTotal.Inc()
log.Warn("L1 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
c.updateL1SyncHeight(resyncHeight, lastBlockHash)
return
}
if insertUpdateErr := c.eventUpdateLogic.L1InsertOrUpdate(c.ctx, l1FetcherResult); insertUpdateErr != nil {
log.Error("failed to save L1 events", "from", from, "to", to, "err", insertUpdateErr)
return
}
c.updateL1SyncHeight(to, lastBlockHash)
}
}
func (c *L1MessageFetcher) updateL1SyncHeight(height uint64, blockHash common.Hash) {
c.l1MessageFetcherSyncHeight.Set(float64(height))
c.l1LastSyncBlockHash = blockHash
c.l1SyncHeight = height
}

View File

@@ -1,152 +0,0 @@
package fetcher
import (
"context"
"math/big"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/utils"
)
// L2MessageFetcher fetches cross message events from L2 and saves them to database.
type L2MessageFetcher struct {
ctx context.Context
cfg *config.LayerConfig
db *gorm.DB
client *ethclient.Client
l2SyncHeight uint64
l2LastSyncBlockHash common.Hash
eventUpdateLogic *logic.EventUpdateLogic
l2FetcherLogic *logic.L2FetcherLogic
l2MessageFetcherRunningTotal prometheus.Counter
l2MessageFetcherReorgTotal prometheus.Counter
l2MessageFetcherSyncHeight prometheus.Gauge
}
// NewL2MessageFetcher creates a new L2MessageFetcher instance.
func NewL2MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
c := &L2MessageFetcher{
ctx: ctx,
cfg: cfg,
db: db,
client: client,
eventUpdateLogic: logic.NewEventUpdateLogic(db, false),
l2FetcherLogic: logic.NewL2FetcherLogic(cfg, db, client),
}
reg := prometheus.DefaultRegisterer
c.l2MessageFetcherRunningTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "L2_message_fetcher_running_total",
Help: "Current count of running L2 message fetcher instances.",
})
c.l2MessageFetcherReorgTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "L2_message_fetcher_reorg_total",
Help: "Total count of blockchain reorgs encountered by the L2 message fetcher.",
})
c.l2MessageFetcherSyncHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "L2_message_fetcher_sync_height",
Help: "Latest blockchain height the L2 message fetcher has synced with.",
})
return c
}
// Start starts the L2 message fetching process.
func (c *L2MessageFetcher) Start() {
l2SentMessageSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
if dbErr != nil {
log.Crit("failed to get L2 cross message processed height", "err", dbErr)
return
}
l2SyncHeight := l2SentMessageSyncedHeight
// Sync from an older block to prevent reorg during restart.
if l2SyncHeight < logic.L2ReorgSafeDepth {
l2SyncHeight = 0
} else {
l2SyncHeight -= logic.L2ReorgSafeDepth
}
header, err := c.client.HeaderByNumber(c.ctx, new(big.Int).SetUint64(l2SyncHeight))
if err != nil {
log.Crit("failed to get L2 header by number", "block number", l2SyncHeight, "err", err)
return
}
c.updateL2SyncHeight(l2SyncHeight, header.Hash())
log.Info("Start L2 message fetcher", "message synced height", l2SentMessageSyncedHeight, "sync start height", l2SyncHeight+1)
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
go func() {
for {
select {
case <-c.ctx.Done():
tick.Stop()
return
case <-tick.C:
c.fetchAndSaveEvents(c.cfg.Confirmation)
}
}
}()
}
func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
startHeight := c.l2SyncHeight + 1
endHeight, rpcErr := utils.GetBlockNumber(c.ctx, c.client, confirmation)
if rpcErr != nil {
log.Error("failed to get L2 block number", "confirmation", confirmation, "err", rpcErr)
return
}
log.Info("fetch and save missing L2 events", "start height", startHeight, "end height", endHeight, "confirmation", confirmation)
c.l2MessageFetcherRunningTotal.Inc()
for from := startHeight; from <= endHeight; from += c.cfg.FetchLimit {
to := from + c.cfg.FetchLimit - 1
if to > endHeight {
to = endHeight
}
isReorg, resyncHeight, lastBlockHash, l2FetcherResult, fetcherErr := c.l2FetcherLogic.L2Fetcher(c.ctx, from, to, c.l2LastSyncBlockHash)
if fetcherErr != nil {
log.Error("failed to fetch L2 events", "from", from, "to", to, "err", fetcherErr)
return
}
if isReorg {
c.l2MessageFetcherReorgTotal.Inc()
log.Warn("L2 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
c.updateL2SyncHeight(resyncHeight, lastBlockHash)
return
}
if insertUpdateErr := c.eventUpdateLogic.L2InsertOrUpdate(c.ctx, l2FetcherResult); insertUpdateErr != nil {
log.Error("failed to save L2 events", "from", from, "to", to, "err", insertUpdateErr)
return
}
if updateErr := c.eventUpdateLogic.UpdateL1BatchIndexAndStatus(c.ctx, c.l2SyncHeight); updateErr != nil {
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
return
}
c.updateL2SyncHeight(to, lastBlockHash)
}
}
func (c *L2MessageFetcher) updateL2SyncHeight(height uint64, blockHash common.Hash) {
c.l2MessageFetcherSyncHeight.Set(float64(height))
c.l2LastSyncBlockHash = blockHash
c.l2SyncHeight = height
}

View File

@@ -1,210 +0,0 @@
package logic
import (
"context"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/utils"
)
// EventUpdateLogic the logic of insert/update the database
type EventUpdateLogic struct {
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight prometheus.Gauge
eventUpdateLogicL2MessageNonceUpdateHeight prometheus.Gauge
}
// NewEventUpdateLogic creates a EventUpdateLogic instance
func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
b := &EventUpdateLogic{
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
}
if !isL1 {
reg := prometheus.DefaultRegisterer
b.eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "event_update_logic_L1_finalize_batch_event_L2_block_update_height",
Help: "L2 block height of the latest L1 batch event that has been finalized and updated in the message_table.",
})
b.eventUpdateLogicL2MessageNonceUpdateHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "event_update_logic_L2_message_nonce_update_height",
Help: "L2 message nonce height in the latest L1 batch event that has been finalized and updated in the message_table.",
})
}
return b
}
// GetL1SyncHeight gets the l1 sync height from db
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, error) {
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL1SentMessage)
if err != nil {
log.Error("failed to get L1 cross message synced height", "error", err)
return 0, 0, err
}
batchSyncedHeight, err := b.batchEventOrm.GetBatchEventSyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get L1 batch event synced height", "error", err)
return 0, 0, err
}
return messageSyncedHeight, batchSyncedHeight, nil
}
// GetL2MessageSyncedHeightInDB gets L2 messages synced height
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, error) {
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL2SentMessage)
if err != nil {
log.Error("failed to get L2 cross message processed height", "err", err)
return 0, err
}
return l2SentMessageSyncedHeight, nil
}
// L1InsertOrUpdate inserts or updates l1 messages
func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult *L1FilterResult) error {
err := b.db.Transaction(func(tx *gorm.DB) error {
if txErr := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages, tx); txErr != nil {
log.Error("failed to insert L1 deposit messages", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages, tx); txErr != nil {
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", txErr)
return txErr
}
if txErr := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents, tx); txErr != nil {
log.Error("failed to insert or update batch events", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents, tx); txErr != nil {
log.Error("failed to insert L1 message queue events", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l1FetcherResult.RevertedTxs, tx); txErr != nil {
log.Error("failed to insert L1 failed gateway router transactions", "err", txErr)
return txErr
}
return nil
})
if err != nil {
log.Error("failed to update db of L1 events", "err", err)
return err
}
return nil
}
func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, batchIndex, startBlock, endBlock uint64) error {
l2WithdrawMessages, err := b.crossMessageOrm.GetL2WithdrawalsByBlockRange(ctx, startBlock, endBlock)
if err != nil {
log.Error("failed to get L2 withdrawals by batch index", "batch index", batchIndex, "err", err)
return err
}
if len(l2WithdrawMessages) == 0 {
return nil
}
withdrawTrie := utils.NewWithdrawTrie()
lastMessage, err := b.crossMessageOrm.GetL2LatestFinalizedWithdrawal(ctx)
if err != nil {
log.Error("failed to get latest L2 finalized sent message event", "err", err)
return err
}
if lastMessage != nil {
withdrawTrie.Initialize(lastMessage.MessageNonce, common.HexToHash(lastMessage.MessageHash), lastMessage.MerkleProof)
}
if withdrawTrie.NextMessageNonce != l2WithdrawMessages[0].MessageNonce {
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actuall next message nonce", l2WithdrawMessages[0].MessageNonce)
return fmt.Errorf("nonce mismatch")
}
messageHashes := make([]common.Hash, len(l2WithdrawMessages))
for i, message := range l2WithdrawMessages {
messageHashes[i] = common.HexToHash(message.MessageHash)
}
proofs := withdrawTrie.AppendMessages(messageHashes)
for i, message := range l2WithdrawMessages {
message.MerkleProof = proofs[i]
message.RollupStatus = int(orm.RollupStatusTypeFinalized)
message.BatchIndex = batchIndex
}
if dbErr := b.crossMessageOrm.UpdateBatchIndexRollupStatusMerkleProofOfL2Messages(ctx, l2WithdrawMessages); dbErr != nil {
log.Error("failed to update batch index and rollup status and merkle proof of L2 messages", "err", dbErr)
return dbErr
}
b.eventUpdateLogicL2MessageNonceUpdateHeight.Set(float64(withdrawTrie.NextMessageNonce - 1))
return nil
}
// UpdateL1BatchIndexAndStatus updates L1 finalized batch index and status
func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, height uint64) error {
finalizedBatches, err := b.batchEventOrm.GetFinalizedBatchesLEBlockHeight(ctx, height)
if err != nil {
log.Error("failed to get batches >= block height", "error", err)
return err
}
for _, finalizedBatch := range finalizedBatches {
log.Info("update finalized batch info of L2 withdrawals", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber)
if updateErr := b.updateL2WithdrawMessageInfos(ctx, finalizedBatch.BatchIndex, finalizedBatch.StartBlockNumber, finalizedBatch.EndBlockNumber); updateErr != nil {
log.Error("failed to update L2 withdraw message infos", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", updateErr)
return updateErr
}
if dbErr := b.batchEventOrm.UpdateBatchEventStatus(ctx, finalizedBatch.BatchIndex); dbErr != nil {
log.Error("failed to update batch event status as updated", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", dbErr)
return dbErr
}
b.eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight.Set(float64(finalizedBatch.EndBlockNumber))
}
return nil
}
// L2InsertOrUpdate inserts or updates L2 messages
func (b *EventUpdateLogic) L2InsertOrUpdate(ctx context.Context, l2FetcherResult *L2FilterResult) error {
err := b.db.Transaction(func(tx *gorm.DB) error {
if txErr := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages, tx); txErr != nil {
log.Error("failed to insert L2 withdrawal messages", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages, tx); txErr != nil {
log.Error("failed to update L2 relayed messages of L1 deposits", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l2FetcherResult.OtherRevertedTxs, tx); txErr != nil {
log.Error("failed to insert L2 failed gateway router transactions", "err", txErr)
return txErr
}
return nil
})
if err != nil {
log.Error("failed to update db of L2 events", "err", err)
return err
}
return nil
}

View File

@@ -1,407 +0,0 @@
package logic
import (
"context"
"encoding/json"
"errors"
"reflect"
"strconv"
"time"
"github.com/go-redis/redis/v8"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/singleflight"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
const (
// cacheKeyPrefixBridgeHistory serves as a specific namespace for all Redis cache keys
// associated with the 'bridge-history' user. This prefix is used to enforce access controls
// in Redis, allowing permissions to be set such that only users with the appropriate
// access rights can read or write to keys starting with "bridge-history".
cacheKeyPrefixBridgeHistory = "bridge-history-"
cacheKeyPrefixL2ClaimableWithdrawalsByAddr = cacheKeyPrefixBridgeHistory + "l2ClaimableWithdrawalsByAddr:"
cacheKeyPrefixL2WithdrawalsByAddr = cacheKeyPrefixBridgeHistory + "l2WithdrawalsByAddr:"
cacheKeyPrefixTxsByAddr = cacheKeyPrefixBridgeHistory + "txsByAddr:"
cacheKeyPrefixQueryTxsByHashes = cacheKeyPrefixBridgeHistory + "queryTxsByHashes:"
cacheKeyExpiredTime = 1 * time.Minute
)
// HistoryLogic services.
type HistoryLogic struct {
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
redis *redis.Client
singleFlight singleflight.Group
cacheMetrics *cacheMetrics
}
// NewHistoryLogic returns bridge history services.
func NewHistoryLogic(db *gorm.DB, redis *redis.Client) *HistoryLogic {
logic := &HistoryLogic{
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
redis: redis,
cacheMetrics: initCacheMetrics(),
}
return logic
}
// GetL2UnclaimedWithdrawalsByAddress gets all unclaimed withdrawal txs under given address.
func (h *HistoryLogic) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, address string, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
cacheKey := cacheKeyPrefixL2ClaimableWithdrawalsByAddr + address
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
if err != nil {
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
if isHit {
h.cacheMetrics.cacheHits.WithLabelValues("GetL2UnclaimedWithdrawalsByAddress").Inc()
log.Info("cache hit", "cache key", cacheKey)
return pagedTxs, total, nil
}
h.cacheMetrics.cacheMisses.WithLabelValues("GetL2UnclaimedWithdrawalsByAddress").Inc()
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
if err != nil {
return nil, err
}
return messages, nil
})
if err != nil {
log.Error("failed to get L2 claimable withdrawals by address", "address", address, "error", err)
return nil, 0, err
}
messages, ok := result.([]*orm.CrossMessage)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
}
// GetL2WithdrawalsByAddress gets all withdrawal txs under given address.
func (h *HistoryLogic) GetL2WithdrawalsByAddress(ctx context.Context, address string, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
cacheKey := cacheKeyPrefixL2WithdrawalsByAddr + address
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
if err != nil {
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
if isHit {
h.cacheMetrics.cacheHits.WithLabelValues("GetL2WithdrawalsByAddress").Inc()
log.Info("cache hit", "cache key", cacheKey)
return pagedTxs, total, nil
}
h.cacheMetrics.cacheMisses.WithLabelValues("GetL2WithdrawalsByAddress").Inc()
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
if err != nil {
return nil, err
}
return messages, nil
})
if err != nil {
log.Error("failed to get L2 withdrawals by address", "address", address, "error", err)
return nil, 0, err
}
messages, ok := result.([]*orm.CrossMessage)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
}
// GetTxsByAddress gets tx infos under given address.
func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address string, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
cacheKey := cacheKeyPrefixTxsByAddr + address
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
if err != nil {
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
if isHit {
h.cacheMetrics.cacheHits.WithLabelValues("GetTxsByAddress").Inc()
log.Info("cache hit", "cache key", cacheKey)
return pagedTxs, total, nil
}
h.cacheMetrics.cacheMisses.WithLabelValues("GetTxsByAddress").Inc()
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetTxsByAddress(ctx, address)
if err != nil {
return nil, err
}
return messages, nil
})
if err != nil {
log.Error("failed to get txs by address", "address", address, "error", err)
return nil, 0, err
}
messages, ok := result.([]*orm.CrossMessage)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
}
// GetTxsByHashes gets tx infos under given tx hashes.
func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([]*types.TxHistoryInfo, error) {
hashesMap := make(map[string]struct{}, len(txHashes))
results := make([]*types.TxHistoryInfo, 0, len(txHashes))
uncachedHashes := make([]string, 0, len(txHashes))
for _, hash := range txHashes {
if _, exists := hashesMap[hash]; exists {
// Skip duplicate tx hash values.
continue
}
hashesMap[hash] = struct{}{}
cacheKey := cacheKeyPrefixQueryTxsByHashes + hash
cachedData, err := h.redis.Get(ctx, cacheKey).Bytes()
if err != nil && errors.Is(err, redis.Nil) {
h.cacheMetrics.cacheMisses.WithLabelValues("PostQueryTxsByHashes").Inc()
log.Info("cache miss", "cache key", cacheKey)
uncachedHashes = append(uncachedHashes, hash)
continue
}
if err != nil {
log.Error("failed to get data from Redis", "error", err)
uncachedHashes = append(uncachedHashes, hash)
continue
}
h.cacheMetrics.cacheHits.WithLabelValues("PostQueryTxsByHashes").Inc()
log.Info("cache hit", "cache key", cacheKey)
if len(cachedData) == 0 {
continue
}
var txInfo types.TxHistoryInfo
if unmarshalErr := json.Unmarshal(cachedData, &txInfo); unmarshalErr != nil {
log.Error("failed to unmarshal cached data", "error", unmarshalErr)
uncachedHashes = append(uncachedHashes, hash)
continue
}
results = append(results, &txInfo)
}
if len(uncachedHashes) > 0 {
messages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
var txHistories []*types.TxHistoryInfo
for _, message := range messages {
txHistories = append(txHistories, getTxHistoryInfo(message))
}
resultMap := make(map[string]*types.TxHistoryInfo)
for _, result := range txHistories {
results = append(results, result)
resultMap[result.Hash] = result
}
for _, hash := range uncachedHashes {
cacheKey := cacheKeyPrefixQueryTxsByHashes + hash
result, found := resultMap[hash]
if !found {
// tx hash not found, which is also a valid result, cache empty string.
if cacheErr := h.redis.Set(ctx, cacheKey, "", cacheKeyExpiredTime).Err(); cacheErr != nil {
log.Error("failed to set data to Redis", "error", cacheErr)
}
continue
}
jsonData, unmarshalErr := json.Marshal(result)
if unmarshalErr != nil {
log.Error("failed to marshal data", "error", unmarshalErr)
continue
}
if cacheErr := h.redis.Set(ctx, cacheKey, jsonData, cacheKeyExpiredTime).Err(); cacheErr != nil {
log.Error("failed to set data to Redis", "error", cacheErr)
}
}
}
return results, nil
}
func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
txHistory := &types.TxHistoryInfo{
MessageHash: message.MessageHash,
TokenType: orm.TokenType(message.TokenType),
TokenIDs: utils.ConvertStringToStringArray(message.TokenIDs),
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmounts),
L1TokenAddress: message.L1TokenAddress,
L2TokenAddress: message.L2TokenAddress,
MessageType: orm.MessageType(message.MessageType),
TxStatus: orm.TxStatusType(message.TxStatus),
BlockTimestamp: message.BlockTimestamp,
}
if txHistory.MessageType == orm.MessageTypeL1SentMessage {
txHistory.Hash = message.L1TxHash
txHistory.ReplayTxHash = message.L1ReplayTxHash
txHistory.RefundTxHash = message.L1RefundTxHash
txHistory.BlockNumber = message.L1BlockNumber
txHistory.CounterpartChainTx = &types.CounterpartChainTx{
Hash: message.L2TxHash,
BlockNumber: message.L2BlockNumber,
}
} else {
txHistory.Hash = message.L2TxHash
txHistory.BlockNumber = message.L2BlockNumber
txHistory.CounterpartChainTx = &types.CounterpartChainTx{
Hash: message.L1TxHash,
BlockNumber: message.L1BlockNumber,
}
if orm.RollupStatusType(message.RollupStatus) == orm.RollupStatusTypeFinalized {
txHistory.ClaimInfo = &types.ClaimInfo{
From: message.MessageFrom,
To: message.MessageTo,
Value: message.MessageValue,
Nonce: strconv.FormatUint(message.MessageNonce, 10),
Message: message.MessageData,
Proof: types.L2MessageProof{
BatchIndex: strconv.FormatUint(message.BatchIndex, 10),
MerkleProof: "0x" + common.Bytes2Hex(message.MerkleProof),
},
Claimable: true,
}
}
}
return txHistory
}
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
start := int64((pageNum - 1) * pageSize)
end := start + int64(pageSize) - 1
total, err := h.redis.ZCard(ctx, cacheKey).Result()
if err != nil {
log.Error("failed to get zcard result", "error", err)
return nil, 0, false, err
}
if total == 0 {
return nil, 0, false, nil
}
values, err := h.redis.ZRange(ctx, cacheKey, start, end).Result()
if err != nil {
log.Error("failed to get zrange result", "error", err)
return nil, 0, false, err
}
if len(values) == 0 {
return nil, 0, false, nil
}
// check if it's empty placeholder.
if len(values) == 1 && values[0] == "empty_page" {
return nil, 0, true, nil
}
var pagedTxs []*types.TxHistoryInfo
for _, v := range values {
var tx types.TxHistoryInfo
if unmarshalErr := json.Unmarshal([]byte(v), &tx); unmarshalErr != nil {
log.Error("failed to unmarshal transaction data", "error", unmarshalErr)
return nil, 0, false, unmarshalErr
}
pagedTxs = append(pagedTxs, &tx)
}
return pagedTxs, uint64(total), true, nil
}
func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []*types.TxHistoryInfo) error {
_, err := h.redis.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
if len(txs) == 0 {
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: 0, Member: "empty_page"}).Err(); err != nil {
log.Error("failed to add empty page indicator to sorted set", "error", err)
return err
}
} else {
// The transactions are sorted, thus we set the score as their indices.
for i, tx := range txs {
txBytes, err := json.Marshal(tx)
if err != nil {
log.Error("failed to marshal transaction to json", "error", err)
return err
}
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(i), Member: txBytes}).Err(); err != nil {
log.Error("failed to add transaction to sorted set", "error", err)
return err
}
}
}
if err := pipe.Expire(ctx, cacheKey, cacheKeyExpiredTime).Err(); err != nil {
log.Error("failed to set expiry time", "error", err)
return err
}
return nil
})
if err != nil {
log.Error("failed to execute transaction", "error", err)
return err
}
return nil
}
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, messages []*orm.CrossMessage, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
var txHistories []*types.TxHistoryInfo
for _, message := range messages {
txHistories = append(txHistories, getTxHistoryInfo(message))
}
err := h.cacheTxsInfo(ctx, cacheKey, txHistories)
if err != nil {
log.Error("failed to cache txs info", "key", cacheKey, "err", err)
return nil, 0, err
}
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
if err != nil {
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
if !isHit {
log.Error("cache miss after write, expect hit", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
return pagedTxs, total, nil
}

View File

@@ -1,40 +0,0 @@
package logic
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type cacheMetrics struct {
cacheHits *prometheus.CounterVec
cacheMisses *prometheus.CounterVec
}
var (
initMetricsOnce sync.Once
cm *cacheMetrics
)
func initCacheMetrics() *cacheMetrics {
initMetricsOnce.Do(func() {
cm = &cacheMetrics{
cacheHits: promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "bridge_history_api_cache_hits_total",
Help: "The total number of cache hits",
},
[]string{"api"},
),
cacheMisses: promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "bridge_history_api_cache_misses_total",
Help: "The total number of cache misses",
},
[]string{"api"},
),
}
})
return cm
}

View File

@@ -1,272 +0,0 @@
package logic
import (
"context"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/utils"
)
// L1EventParser the l1 event parser
type L1EventParser struct {
}
// NewL1EventParser creates l1 event parser
func NewL1EventParser() *L1EventParser {
return &L1EventParser{}
}
// ParseL1CrossChainEventLogs parses L1 watched cross chain events.
func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l1DepositMessages []*orm.CrossMessage
var l1RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1DepositETHSig:
event := backendabi.ETHMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ETHGatewayABI, &event, "DepositETH", vlog); err != nil {
log.Warn("Failed to unpack DepositETH event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeETH)
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.IL1ERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil {
log.Warn("Failed to unpack DepositERC20 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC20)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "DepositERC721", vlog); err != nil {
log.Warn("Failed to unpack DepositERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
case backendabi.L1BatchDepositERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "BatchDepositERC721", vlog); err != nil {
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "DepositERC1155", vlog); err != nil {
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L1BatchDepositERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog); err != nil {
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
case backendabi.L1SentMessageEventSig:
event := backendabi.L1SentMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
l1DepositMessages = append(l1DepositMessages, &orm.CrossMessage{
L1BlockNumber: vlog.BlockNumber,
Sender: event.Sender.String(),
Receiver: event.Target.String(),
TokenType: int(orm.TokenTypeETH),
L1TxHash: vlog.TxHash.String(),
TokenAmounts: event.Value.String(),
MessageNonce: event.MessageNonce.Uint64(),
MessageType: int(orm.MessageTypeL1SentMessage),
TxStatus: int(orm.TxStatusTypeSent),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
})
case backendabi.L1RelayedMessageEventSig:
event := backendabi.L1RelayedMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "RelayedMessage", vlog); err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return nil, nil, err
}
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(orm.TxStatusTypeRelayed),
MessageType: int(orm.MessageTypeL2SentMessage),
})
case backendabi.L1FailedRelayedMessageEventSig:
event := backendabi.L1FailedRelayedMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "FailedRelayedMessage", vlog); err != nil {
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
return nil, nil, err
}
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(orm.TxStatusTypeFailedRelayed),
MessageType: int(orm.MessageTypeL2SentMessage),
})
}
}
return l1DepositMessages, l1RelayedMessages, nil
}
// ParseL1BatchEventLogs parses L1 watched batch events.
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client) ([]*orm.BatchEvent, error) {
var l1BatchEvents []*orm.BatchEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1CommitBatchEventSig:
event := backendabi.L1CommitBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "CommitBatch", vlog); err != nil {
log.Warn("Failed to unpack CommitBatch event", "err", err)
return nil, err
}
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending {
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
return nil, err
}
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
if err != nil {
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(orm.BatchStatusTypeCommitted),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
StartBlockNumber: startBlock,
EndBlockNumber: endBlock,
L1BlockNumber: vlog.BlockNumber,
})
case backendabi.L1RevertBatchEventSig:
event := backendabi.L1RevertBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
log.Warn("Failed to unpack RevertBatch event", "err", err)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(orm.BatchStatusTypeReverted),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
})
case backendabi.L1FinalizeBatchEventSig:
event := backendabi.L1FinalizeBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
log.Warn("Failed to unpack FinalizeBatch event", "err", err)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(orm.BatchStatusTypeFinalized),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
})
}
}
return l1BatchEvents, nil
}
// ParseL1MessageQueueEventLogs parses L1 watched message queue events.
func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1DepositMessages []*orm.CrossMessage) ([]*orm.MessageQueueEvent, error) {
messageHashes := make(map[common.Hash]struct{})
for _, msg := range l1DepositMessages {
messageHashes[common.HexToHash(msg.MessageHash)] = struct{}{}
}
var l1MessageQueueEvents []*orm.MessageQueueEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1QueueTransactionEventSig:
event := backendabi.L1QueueTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "QueueTransaction", vlog); err != nil {
log.Warn("Failed to unpack QueueTransaction event", "err", err)
return nil, err
}
messageHash := common.BytesToHash(crypto.Keccak256(event.Data))
// If the message hash is not found in the map, it's not a replayMessage or enforced tx (omitted); add it to the events.
if _, exists := messageHashes[messageHash]; !exists {
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: orm.MessageQueueEventTypeQueueTransaction,
QueueIndex: event.QueueIndex,
MessageHash: messageHash,
TxHash: vlog.TxHash,
})
}
case backendabi.L1DequeueTransactionEventSig:
event := backendabi.L1DequeueTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DequeueTransaction", vlog); err != nil {
log.Warn("Failed to unpack DequeueTransaction event", "err", err)
return nil, err
}
skippedIndices := utils.GetSkippedQueueIndices(event.StartIndex.Uint64(), event.SkippedBitmap)
for _, index := range skippedIndices {
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: orm.MessageQueueEventTypeDequeueTransaction,
QueueIndex: index,
})
}
case backendabi.L1DropTransactionEventSig:
event := backendabi.L1DropTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil {
log.Warn("Failed to unpack DropTransaction event", "err", err)
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: orm.MessageQueueEventTypeDropTransaction,
QueueIndex: event.Index.Uint64(),
TxHash: vlog.TxHash,
})
}
}
return l1MessageQueueEvents, nil
}

View File

@@ -1,316 +0,0 @@
package logic
import (
"context"
"math/big"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/utils"
)
// L1ReorgSafeDepth represents the number of block confirmations considered safe against L1 chain reorganizations.
// Reorganizations at this depth under normal cases are extremely unlikely.
const L1ReorgSafeDepth = 64
// L1FilterResult L1 fetcher result
type L1FilterResult struct {
DepositMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage
BatchEvents []*orm.BatchEvent
MessageQueueEvents []*orm.MessageQueueEvent
RevertedTxs []*orm.CrossMessage
}
// L1FetcherLogic the L1 fetcher logic
type L1FetcherLogic struct {
cfg *config.LayerConfig
client *ethclient.Client
addressList []common.Address
parser *L1EventParser
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
l1FetcherLogicFetchedTotal *prometheus.CounterVec
}
// NewL1FetcherLogic creates L1 fetcher logic
func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
addressList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
common.HexToAddress(cfg.ERC1155GatewayAddr),
common.HexToAddress(cfg.MessengerAddr),
common.HexToAddress(cfg.ScrollChainAddr),
common.HexToAddress(cfg.MessageQueueAddr),
}
// Optional erc20 gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
}
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList)
f := &L1FetcherLogic{
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
cfg: cfg,
client: client,
addressList: addressList,
parser: NewL1EventParser(),
}
reg := prometheus.DefaultRegisterer
f.l1FetcherLogicFetchedTotal = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "L1_fetcher_logic_fetched_total",
Help: "The total number of events or failed txs fetched in L1 fetcher logic.",
}, []string{"type"})
return f
}
func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.Block, error) {
blocks, err := utils.GetBlocksInRange(ctx, f.client, from, to)
if err != nil {
log.Error("failed to get L1 blocks in range", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
for _, block := range blocks {
if block.ParentHash() != lastBlockHash {
log.Warn("L1 reorg detected", "reorg height", block.NumberU64()-1, "expected hash", block.ParentHash().String(), "local hash", lastBlockHash.String())
var resyncHeight uint64
if block.NumberU64() > L1ReorgSafeDepth+1 {
resyncHeight = block.NumberU64() - L1ReorgSafeDepth - 1
}
header, err := f.client.HeaderByNumber(ctx, new(big.Int).SetUint64(resyncHeight))
if err != nil {
log.Error("failed to get L1 header by number", "block number", resyncHeight, "err", err)
return false, 0, common.Hash{}, nil, err
}
return true, resyncHeight, header.Hash(), nil, nil
}
lastBlockHash = block.Hash()
}
return false, 0, lastBlockHash, blocks, nil
}
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, error) {
var l1RevertedTxs []*orm.CrossMessage
blockTimestampsMap := make(map[uint64]uint64)
for i := from; i <= to; i++ {
block := blocks[i-from]
blockTimestampsMap[block.NumberU64()] = block.Time()
for _, tx := range block.Transactions() {
txTo := tx.To()
if txTo == nil {
continue
}
toAddress := txTo.String()
// GatewayRouter: L1 deposit.
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
if toAddress != f.cfg.GatewayRouterAddr && toAddress != f.cfg.MessengerAddr {
continue
}
var receipt *types.Receipt
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, nil, receiptErr
}
// Check if the transaction is failed
if receipt.Status != types.ReceiptStatusFailed {
continue
}
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, senderErr := signer.Sender(tx)
if senderErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", senderErr)
return nil, nil, senderErr
}
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
L1TxHash: tx.Hash().String(),
MessageType: int(orm.MessageTypeL1SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L1BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(orm.TxStatusTypeSentTxReverted),
})
}
}
return blockTimestampsMap, l1RevertedTxs, nil
}
func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
query := ethereum.FilterQuery{
FromBlock: new(big.Int).SetUint64(from), // inclusive
ToBlock: new(big.Int).SetUint64(to), // inclusive
Addresses: f.addressList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 13)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1DepositERC721Sig
query.Topics[0][3] = backendabi.L1DepositERC1155Sig
query.Topics[0][4] = backendabi.L1SentMessageEventSig
query.Topics[0][5] = backendabi.L1RelayedMessageEventSig
query.Topics[0][6] = backendabi.L1FailedRelayedMessageEventSig
query.Topics[0][7] = backendabi.L1CommitBatchEventSig
query.Topics[0][8] = backendabi.L1RevertBatchEventSig
query.Topics[0][9] = backendabi.L1FinalizeBatchEventSig
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
log.Error("failed to filter L1 event logs", "from", from, "to", to, "err", err)
return nil, err
}
return eventLogs, nil
}
// L1Fetcher L1 fetcher
func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, *L1FilterResult, error) {
log.Info("fetch and save L1 events", "from", from, "to", to)
isReorg, reorgHeight, blockHash, blocks, getErr := f.getBlocksAndDetectReorg(ctx, from, to, lastBlockHash)
if getErr != nil {
log.Error("L1Fetcher getBlocksAndDetectReorg failed", "from", from, "to", to, "error", getErr)
return false, 0, common.Hash{}, nil, getErr
}
if isReorg {
return isReorg, reorgHeight, blockHash, nil, nil
}
blockTimestampsMap, l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
if err != nil {
log.Error("L1Fetcher getRevertedTxs failed", "from", from, "to", to, "error", err)
return false, 0, common.Hash{}, nil, err
}
eventLogs, err := f.l1FetcherLogs(ctx, from, to)
if err != nil {
log.Error("L1Fetcher l1FetcherLogs failed", "from", from, "to", to, "error", err)
return false, 0, common.Hash{}, nil, err
}
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L1 cross chain event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client)
if err != nil {
log.Error("failed to parse L1 batch event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
l1MessageQueueEvents, err := f.parser.ParseL1MessageQueueEventLogs(eventLogs, l1DepositMessages)
if err != nil {
log.Error("failed to parse L1 message queue event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
res := L1FilterResult{
DepositMessages: l1DepositMessages,
RelayedMessages: l1RelayedMessages,
BatchEvents: l1BatchEvents,
MessageQueueEvents: l1MessageQueueEvents,
RevertedTxs: l1RevertedTxs,
}
f.updateMetrics(res)
return false, 0, blockHash, &res, nil
}
func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_gateway_router_transaction").Add(float64(len(res.RevertedTxs)))
for _, depositMessage := range res.DepositMessages {
switch orm.TokenType(depositMessage.TokenType) {
case orm.TokenTypeETH:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_eth").Add(1)
case orm.TokenTypeERC20:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc20").Add(1)
case orm.TokenTypeERC721:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc721").Add(1)
case orm.TokenTypeERC1155:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc1155").Add(1)
}
}
for _, relayedMessage := range res.RelayedMessages {
switch orm.TxStatusType(relayedMessage.TxStatus) {
case orm.TxStatusTypeRelayed:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_relayed_message").Add(1)
case orm.TxStatusTypeFailedRelayed:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_relayed_message").Add(1)
}
// Have not tracked L1 relayed message reverted transaction yet.
// 1. need to parse calldata of tx.
// 2. hard to track internal tx.
}
for _, batchEvent := range res.BatchEvents {
switch orm.BatchStatusType(batchEvent.BatchStatus) {
case orm.BatchStatusTypeCommitted:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_commit_batch_event").Add(1)
case orm.BatchStatusTypeReverted:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_revert_batch_event").Add(1)
case orm.BatchStatusTypeFinalized:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_finalize_batch_event").Add(1)
}
}
for _, messageQueueEvent := range res.MessageQueueEvents {
switch messageQueueEvent.EventType {
case orm.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_replay_message_or_enforced_transaction").Add(1)
case orm.MessageQueueEventTypeDequeueTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1)
case orm.MessageQueueEventTypeDropTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1)
}
}
}

View File

@@ -1,167 +0,0 @@
package logic
import (
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/utils"
)
// L2EventParser the L2 event parser
type L2EventParser struct {
}
// NewL2EventParser creates the L2 event parser
func NewL2EventParser() *L2EventParser {
return &L2EventParser{}
}
// ParseL2EventLogs parses L2 watched events
func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l2WithdrawMessages []*orm.CrossMessage
var l2RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2WithdrawETHSig:
event := backendabi.ETHMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawETH event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeETH)
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC20)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L2WithdrawERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
case backendabi.L2BatchWithdrawERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
if err != nil {
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L2BatchWithdrawERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
if err != nil {
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
case backendabi.L2SentMessageEventSig:
event := backendabi.L2SentMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Warn("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
l2WithdrawMessages = append(l2WithdrawMessages, &orm.CrossMessage{
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
Sender: event.Sender.String(),
Receiver: event.Target.String(),
TokenType: int(orm.TokenTypeETH),
L2TxHash: vlog.TxHash.String(),
TokenAmounts: event.Value.String(),
MessageFrom: event.Sender.String(),
MessageTo: event.Target.String(),
MessageValue: event.Value.String(),
MessageNonce: event.MessageNonce.Uint64(),
MessageData: hexutil.Encode(event.Message),
MessageType: int(orm.MessageTypeL2SentMessage),
TxStatus: int(orm.TxStatusTypeSent),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
L2BlockNumber: vlog.BlockNumber,
})
case backendabi.L2RelayedMessageEventSig:
event := backendabi.L2RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return nil, nil, err
}
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(orm.TxStatusTypeRelayed),
MessageType: int(orm.MessageTypeL1SentMessage),
})
case backendabi.L2FailedRelayedMessageEventSig:
event := backendabi.L2RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "FailedRelayedMessage", vlog)
if err != nil {
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
return nil, nil, err
}
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(orm.TxStatusTypeFailedRelayed),
MessageType: int(orm.MessageTypeL1SentMessage),
})
}
}
return l2WithdrawMessages, l2RelayedMessages, nil
}

View File

@@ -1,281 +0,0 @@
package logic
import (
"context"
"math/big"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/utils"
)
// L2ReorgSafeDepth represents the number of block confirmations considered safe against L2 chain reorganizations.
// Reorganizations at this depth under normal cases are extremely unlikely.
const L2ReorgSafeDepth = 256
// L2FilterResult the L2 filter result
type L2FilterResult struct {
WithdrawMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
}
// L2FetcherLogic the L2 fetcher logic
type L2FetcherLogic struct {
cfg *config.LayerConfig
client *ethclient.Client
addressList []common.Address
parser *L2EventParser
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
l2FetcherLogicFetchedTotal *prometheus.CounterVec
}
// NewL2FetcherLogic create L2 fetcher logic
func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
addressList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
common.HexToAddress(cfg.ERC1155GatewayAddr),
common.HexToAddress(cfg.MessengerAddr),
}
// Optional erc20 gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
}
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList)
f := &L2FetcherLogic{
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
cfg: cfg,
client: client,
addressList: addressList,
parser: NewL2EventParser(),
}
reg := prometheus.DefaultRegisterer
f.l2FetcherLogicFetchedTotal = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "L2_fetcher_logic_fetched_total",
Help: "The total number of events or failed txs fetched in L2 fetcher logic.",
}, []string{"type"})
return f
}
func (f *L2FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.Block, error) {
blocks, err := utils.GetBlocksInRange(ctx, f.client, from, to)
if err != nil {
log.Error("failed to get L2 blocks in range", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
for _, block := range blocks {
if block.ParentHash() != lastBlockHash {
log.Warn("L2 reorg detected", "reorg height", block.NumberU64()-1, "expected hash", block.ParentHash().String(), "local hash", lastBlockHash.String())
var resyncHeight uint64
if block.NumberU64() > L2ReorgSafeDepth+1 {
resyncHeight = block.NumberU64() - L2ReorgSafeDepth - 1
}
header, err := f.client.HeaderByNumber(ctx, new(big.Int).SetUint64(resyncHeight))
if err != nil {
log.Error("failed to get L2 header by number", "block number", resyncHeight, "err", err)
return false, 0, common.Hash{}, nil, err
}
return true, resyncHeight, header.Hash(), nil, nil
}
lastBlockHash = block.Hash()
}
return false, 0, lastBlockHash, blocks, nil
}
func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, []*orm.CrossMessage, error) {
var l2RevertedUserTxs []*orm.CrossMessage
var l2RevertedRelayedMessageTxs []*orm.CrossMessage
blockTimestampsMap := make(map[uint64]uint64)
for i := from; i <= to; i++ {
block := blocks[i-from]
blockTimestampsMap[block.NumberU64()] = block.Time()
for _, tx := range block.Transactions() {
txTo := tx.To()
if txTo == nil {
continue
}
toAddress := txTo.String()
// GatewayRouter: L2 withdrawal.
if toAddress == f.cfg.GatewayRouterAddr {
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, nil, nil, receiptErr
}
// Check if the transaction is failed
if receipt.Status == types.ReceiptStatusFailed {
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, signerErr := signer.Sender(tx)
if signerErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
return nil, nil, nil, signerErr
}
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
L2TxHash: tx.Hash().String(),
MessageType: int(orm.MessageTypeL2SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L2BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(orm.TxStatusTypeSentTxReverted),
})
}
}
if tx.Type() == types.L1MessageTxType {
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, nil, nil, receiptErr
}
// Check if the transaction is failed
if receipt.Status == types.ReceiptStatusFailed {
l2RevertedRelayedMessageTxs = append(l2RevertedRelayedMessageTxs, &orm.CrossMessage{
MessageHash: common.BytesToHash(crypto.Keccak256(tx.AsL1MessageTx().Data)).String(),
L2TxHash: tx.Hash().String(),
TxStatus: int(orm.TxStatusTypeRelayTxReverted),
L2BlockNumber: receipt.BlockNumber.Uint64(),
MessageType: int(orm.MessageTypeL1SentMessage),
})
}
}
}
}
return blockTimestampsMap, l2RevertedUserTxs, l2RevertedRelayedMessageTxs, nil
}
func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
query := ethereum.FilterQuery{
FromBlock: new(big.Int).SetUint64(from), // inclusive
ToBlock: new(big.Int).SetUint64(to), // inclusive
Addresses: f.addressList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L2WithdrawETHSig
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
query.Topics[0][2] = backendabi.L2WithdrawERC721Sig
query.Topics[0][3] = backendabi.L2WithdrawERC1155Sig
query.Topics[0][4] = backendabi.L2SentMessageEventSig
query.Topics[0][5] = backendabi.L2RelayedMessageEventSig
query.Topics[0][6] = backendabi.L2FailedRelayedMessageEventSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
log.Error("Failed to filter L2 event logs", "from", from, "to", to, "err", err)
return nil, err
}
return eventLogs, nil
}
// L2Fetcher L2 fetcher
func (f *L2FetcherLogic) L2Fetcher(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, *L2FilterResult, error) {
log.Info("fetch and save L2 events", "from", from, "to", to)
isReorg, reorgHeight, blockHash, blocks, getErr := f.getBlocksAndDetectReorg(ctx, from, to, lastBlockHash)
if getErr != nil {
log.Error("L2Fetcher getBlocksAndDetectReorg failed", "from", from, "to", to, "error", getErr)
return false, 0, common.Hash{}, nil, getErr
}
if isReorg {
return isReorg, reorgHeight, blockHash, nil, nil
}
blockTimestampsMap, revertedUserTxs, revertedRelayMsgs, routerErr := f.getRevertedTxs(ctx, from, to, blocks)
if routerErr != nil {
log.Error("L2Fetcher getRevertedTxs failed", "from", from, "to", to, "error", routerErr)
return false, 0, common.Hash{}, nil, routerErr
}
eventLogs, err := f.l2FetcherLogs(ctx, from, to)
if err != nil {
log.Error("L2Fetcher l2FetcherLogs failed", "from", from, "to", to, "error", err)
return false, 0, common.Hash{}, nil, err
}
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L2 event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
res := L2FilterResult{
WithdrawMessages: l2WithdrawMessages,
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
OtherRevertedTxs: revertedUserTxs,
}
f.updateMetrics(res)
return false, 0, blockHash, &res, nil
}
func (f *L2FetcherLogic) updateMetrics(res L2FilterResult) {
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_gateway_router_transaction").Add(float64(len(res.OtherRevertedTxs)))
for _, withdrawMessage := range res.WithdrawMessages {
switch orm.TokenType(withdrawMessage.TokenType) {
case orm.TokenTypeETH:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_eth").Add(1)
case orm.TokenTypeERC20:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc20").Add(1)
case orm.TokenTypeERC721:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc721").Add(1)
case orm.TokenTypeERC1155:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc1155").Add(1)
}
}
for _, relayedMessage := range res.RelayedMessages {
switch orm.TxStatusType(relayedMessage.TxStatus) {
case orm.TxStatusTypeRelayed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_relayed_message").Add(1)
case orm.TxStatusTypeFailedRelayed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_relayed_message").Add(1)
case orm.TxStatusTypeRelayTxReverted:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_reverted_relayed_message_transaction").Add(1)
}
}
}

View File

@@ -1,141 +0,0 @@
package orm
import (
"context"
"fmt"
"time"
"gorm.io/gorm"
)
// BatchStatusType represents the type of batch status.
type BatchStatusType int
// Constants for BatchStatusType.
const (
BatchStatusTypeUnknown BatchStatusType = iota
BatchStatusTypeCommitted
BatchStatusTypeReverted
BatchStatusTypeFinalized
)
// UpdateStatusType represents the whether batch info is updated in message table.
type UpdateStatusType int
// Constants for UpdateStatusType.
const (
UpdateStatusTypeUnupdated UpdateStatusType = iota
UpdateStatusTypeUpdated
)
// BatchEvent represents a batch event.
type BatchEvent struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id;primary_key"`
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
BatchStatus int `json:"batch_status" gorm:"column:batch_status"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
BatchHash string `json:"batch_hash" gorm:"column:batch_hash"`
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
UpdateStatus int `json:"update_status" gorm:"column:update_status"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
}
// TableName returns the table name for the BatchEvent model.
func (*BatchEvent) TableName() string {
return "batch_event_v2"
}
// NewBatchEvent returns a new instance of BatchEvent.
func NewBatchEvent(db *gorm.DB) *BatchEvent {
return &BatchEvent{db: db}
}
// GetBatchEventSyncedHeightInDB returns the maximum l1_block_number from the batch_event_v2 table.
func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64, error) {
var batch BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Order("l1_block_number desc")
if err := db.First(&batch).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("failed to get batch synced height in db, error: %w", err)
}
return batch.L1BlockNumber, nil
}
// GetFinalizedBatchesLEBlockHeight returns the finalized batches with end block <= given block height in db.
func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, blockHeight uint64) ([]*BatchEvent, error) {
var batches []*BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("end_block_number <= ?", blockHeight)
db = db.Where("batch_status = ?", BatchStatusTypeFinalized)
db = db.Where("update_status = ?", UpdateStatusTypeUnupdated)
db = db.Order("batch_index asc")
if err := db.Find(&batches).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get batches >= block height, error: %w", err)
}
return batches, nil
}
// InsertOrUpdateBatchEvents inserts a new batch event or updates an existing one based on the BatchStatusType.
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent, dbTX ...*gorm.DB) error {
for _, l1BatchEvent := range l1BatchEvents {
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&BatchEvent{})
updateFields := make(map[string]interface{})
switch BatchStatusType(l1BatchEvent.BatchStatus) {
case BatchStatusTypeCommitted:
if err := db.Create(l1BatchEvent).Error; err != nil {
return fmt.Errorf("failed to insert batch event, error: %w", err)
}
case BatchStatusTypeFinalized:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = BatchStatusTypeFinalized
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
case BatchStatusTypeReverted:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = BatchStatusTypeReverted
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
// Soft delete the batch event.
if err := db.Delete(l1BatchEvent).Error; err != nil {
return fmt.Errorf("failed to soft delete batch event, error: %w", err)
}
}
}
return nil
}
// UpdateBatchEventStatus updates the UpdateStatusType of a BatchEvent given its batch index.
func (c *BatchEvent) UpdateBatchEventStatus(ctx context.Context, batchIndex uint64) error {
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("batch_index = ?", batchIndex)
updateFields := map[string]interface{}{
"update_status": UpdateStatusTypeUpdated,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event status, batchIndex: %d, error: %w", batchIndex, err)
}
return nil
}

View File

@@ -1,545 +0,0 @@
package orm
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
"github.com/scroll-tech/go-ethereum/common"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// TokenType represents the type of token.
type TokenType int
// Constants for TokenType.
const (
TokenTypeUnknown TokenType = iota
TokenTypeETH
TokenTypeERC20
TokenTypeERC721
TokenTypeERC1155
)
// MessageType represents the type of message.
type MessageType int
// Constants for MessageType.
const (
MessageTypeUnknown MessageType = iota
MessageTypeL1SentMessage
MessageTypeL2SentMessage
)
// TxStatusType represents the status of a transaction.
type TxStatusType int
// Constants for TxStatusType.
const (
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
// from a later status (e.g., relayed) back to "sent".
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
TxStatusTypeSent TxStatusType = iota
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
TxStatusTypeRelayed // Terminal status.
// FailedRelayedMessage event: encoded tx failed, cannot retry. e.g., https://sepolia.scrollscan.com/tx/0xfc7d3ea5ec8dc9b664a5a886c3b33d21e665355057601033481a439498efb79a
TxStatusTypeFailedRelayed // Terminal status.
// In some cases, user can retry with a larger gas limit. e.g., https://sepolia.scrollscan.com/tx/0x7323a7ba29492cb47d92206411be99b27896f2823cee0633a596b646b73f1b5b
TxStatusTypeRelayTxReverted
TxStatusTypeSkipped
TxStatusTypeDropped // Terminal status.
)
// RollupStatusType represents the status of a rollup.
type RollupStatusType int
// Constants for RollupStatusType.
const (
RollupStatusTypeUnknown RollupStatusType = iota
RollupStatusTypeFinalized // only batch finalized status is used.
)
// MessageQueueEventType represents the type of message queue event.
type MessageQueueEventType int
// Constants for MessageQueueEventType.
const (
MessageQueueEventTypeUnknown MessageQueueEventType = iota
MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction
)
// MessageQueueEvent struct represents the details of a batch event.
type MessageQueueEvent struct {
EventType MessageQueueEventType
QueueIndex uint64
// Track replay tx hash and refund tx hash.
TxHash common.Hash
// QueueTransaction only in replayMessage, to track which message is replayed.
MessageHash common.Hash
}
// CrossMessage represents a cross message.
type CrossMessage struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id;primary_key"`
MessageType int `json:"message_type" gorm:"column:message_type"`
RollupStatus int `json:"rollup_status" gorm:"column:rollup_status"`
TxStatus int `json:"tx_status" gorm:"column:tx_status"`
TokenType int `json:"token_type" gorm:"column:token_type"`
Sender string `json:"sender" gorm:"column:sender"`
Receiver string `json:"receiver" gorm:"column:receiver"`
MessageHash string `json:"message_hash" gorm:"column:message_hash"`
L1TxHash string `json:"l1_tx_hash" gorm:"column:l1_tx_hash"` // initial tx hash, if MessageType is MessageTypeL1SentMessage.
L1ReplayTxHash string `json:"l1_replay_tx_hash" gorm:"column:l1_replay_tx_hash"`
L1RefundTxHash string `json:"l1_refund_tx_hash" gorm:"column:l1_refund_tx_hash"`
L2TxHash string `json:"l2_tx_hash" gorm:"column:l2_tx_hash"` // initial tx hash, if MessageType is MessageTypeL2SentMessage.
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
L2BlockNumber uint64 `json:"l2_block_number" gorm:"column:l2_block_number"`
L1TokenAddress string `json:"l1_token_address" gorm:"column:l1_token_address"`
L2TokenAddress string `json:"l2_token_address" gorm:"column:l2_token_address"`
TokenIDs string `json:"token_ids" gorm:"column:token_ids"`
TokenAmounts string `json:"token_amounts" gorm:"column:token_amounts"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"column:block_timestamp"`
MessageFrom string `json:"message_from" gorm:"column:message_from"`
MessageTo string `json:"message_to" gorm:"column:message_to"`
MessageValue string `json:"message_value" gorm:"column:message_value"`
MessageNonce uint64 `json:"message_nonce" gorm:"column:message_nonce"`
MessageData string `json:"message_data" gorm:"column:message_data"`
MerkleProof []byte `json:"merkle_proof" gorm:"column:merkle_proof"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
}
// TableName returns the table name for the CrossMessage model.
func (*CrossMessage) TableName() string {
return "cross_message_v2"
}
// NewCrossMessage returns a new instance of CrossMessage.
func NewCrossMessage(db *gorm.DB) *CrossMessage {
return &CrossMessage{db: db}
}
// GetMessageSyncedHeightInDB returns the latest synced cross message height from the database for a given message type.
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType MessageType) (uint64, error) {
var message CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", messageType)
switch {
case messageType == MessageTypeL1SentMessage:
db = db.Order("l1_block_number desc")
case messageType == MessageTypeL2SentMessage:
db = db.Order("l2_block_number desc")
}
if err := db.First(&message).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
}
switch {
case messageType == MessageTypeL1SentMessage:
return message.L1BlockNumber, nil
case messageType == MessageTypeL2SentMessage:
return message.L2BlockNumber, nil
default:
return 0, fmt.Errorf("invalid message type: %v", messageType)
}
}
// GetL2LatestFinalizedWithdrawal returns the latest finalized L2 withdrawal from the database.
func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*CrossMessage, error) {
var message CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("rollup_status = ?", RollupStatusTypeFinalized)
db = db.Order("message_nonce desc")
if err := db.First(&message).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
}
return &message, nil
}
// GetL2WithdrawalsByBlockRange returns the L2 withdrawals by block range from the database.
func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBlock, endBlock uint64) ([]*CrossMessage, error) {
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("l2_block_number >= ?", startBlock)
db = db.Where("l2_block_number <= ?", endBlock)
db = db.Where("tx_status != ?", TxStatusTypeSentTxReverted)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Order("message_nonce asc")
if err := db.Find(&messages).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
}
return messages, nil
}
// GetMessagesByTxHashes retrieves all cross messages from the database that match the provided transaction hashes.
func (c *CrossMessage) GetMessagesByTxHashes(ctx context.Context, txHashes []string) ([]*CrossMessage, error) {
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("l1_tx_hash in (?) or l2_tx_hash in (?)", txHashes, txHashes)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get L2 messages by tx hashes, tx hashes: %v, error: %w", txHashes, err)
}
return messages, nil
}
// GetL2UnclaimedWithdrawalsByAddress retrieves all L2 unclaimed withdrawal messages for a given sender address.
func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, sender string) ([]*CrossMessage, error) {
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", TxStatusTypeSent)
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get L2 claimable withdrawal messages by sender address, sender: %v, error: %w", sender, err)
}
return messages, nil
}
// GetL2WithdrawalsByAddress retrieves all L2 claimable withdrawal messages for a given sender address.
func (c *CrossMessage) GetL2WithdrawalsByAddress(ctx context.Context, sender string) ([]*CrossMessage, error) {
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get L2 withdrawal messages by sender address, sender: %v, error: %w", sender, err)
}
return messages, nil
}
// GetTxsByAddress retrieves all txs for a given sender address.
func (c *CrossMessage) GetTxsByAddress(ctx context.Context, sender string) ([]*CrossMessage, error) {
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get all txs by sender address, sender: %v, error: %w", sender, err)
}
return messages, nil
}
// UpdateL1MessageQueueEventsInfo updates the information about L1 message queue events in the database.
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent, dbTX ...*gorm.DB) error {
// update tx statuses.
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
db = db.Where("tx_status != ?", TxStatusTypeFailedRelayed)
db = db.Where("tx_status != ?", TxStatusTypeDropped)
txStatusUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
// replayMessage case:
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
// Transaction reverted in L2: https://sepolia.scrollscan.com/tx/0xde6ef307a7da255888aad7a4c40a6b8c886e46a8a05883070bbf18b736cbfb8c
// replayMessage: https://sepolia.etherscan.io/tx/0xa5392891232bb32d98fcdbaca0d91b4d22ef2755380d07d982eebd47b147ce28
//
// Note: update l1_tx_hash if the user calls replayMessage, cannot use queue index here,
// because in replayMessage, queue index != message nonce.
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
txStatusUpdateFields["tx_status"] = TxStatusTypeSent // reset status to "sent".
case MessageQueueEventTypeDequeueTransaction:
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = TxStatusTypeSkipped
case MessageQueueEventTypeDropTransaction:
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = TxStatusTypeDropped
}
if err := db.Updates(txStatusUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
}
}
// update tx hashes of replay and refund.
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
txHashUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
case MessageQueueEventTypeDropTransaction:
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txHashUpdateFields["l1_refund_tx_hash"] = l1MessageQueueEvent.TxHash.String()
}
// Check if there are fields to update to avoid empty update operation (skip message).
if len(txHashUpdateFields) > 0 {
if err := db.Updates(txHashUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
}
}
}
return nil
}
// UpdateBatchStatusOfL2Withdrawals updates batch status of L2 withdrawals.
func (c *CrossMessage) UpdateBatchStatusOfL2Withdrawals(ctx context.Context, startBlockNumber, endBlockNumber, batchIndex uint64) error {
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("l2_block_number >= ?", startBlockNumber)
db = db.Where("l2_block_number <= ?", endBlockNumber)
updateFields := make(map[string]interface{})
updateFields["batch_index"] = batchIndex
updateFields["rollup_status"] = RollupStatusTypeFinalized
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch status of L2 sent messages, start: %v, end: %v, index: %v, error: %w", startBlockNumber, endBlockNumber, batchIndex, err)
}
return nil
}
// UpdateBatchIndexRollupStatusMerkleProofOfL2Messages updates the batch_index, rollup_status, and merkle_proof fields for a list of L2 cross messages.
func (c *CrossMessage) UpdateBatchIndexRollupStatusMerkleProofOfL2Messages(ctx context.Context, messages []*CrossMessage) error {
if len(messages) == 0 {
return nil
}
for _, message := range messages {
updateFields := map[string]interface{}{
"batch_index": message.BatchIndex,
"rollup_status": message.RollupStatus,
"merkle_proof": message.MerkleProof,
}
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_hash = ?", message.MessageHash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update L2 message with message_hash %s, error: %w", message.MessageHash, err)
}
}
return nil
}
// InsertOrUpdateL1Messages inserts or updates a list of L1 cross messages into the database.
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoUpdates: clause.AssignmentColumns([]string{"sender", "receiver", "token_type", "l1_block_number", "l1_tx_hash", "l1_token_address", "l2_token_address", "token_ids", "token_amounts", "message_type", "block_timestamp", "message_nonce"}),
})
if err := db.Create(messages).Error; err != nil {
return fmt.Errorf("failed to insert message, error: %w", err)
}
return nil
}
// InsertOrUpdateL2Messages inserts or updates a list of L2 cross messages into the database.
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
// The merkle_proof is updated separately in batch status updates and hence is not included here.
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoUpdates: clause.AssignmentColumns([]string{"sender", "receiver", "token_type", "l2_block_number", "l2_tx_hash", "l1_token_address", "l2_token_address", "token_ids", "token_amounts", "message_type", "block_timestamp", "message_from", "message_to", "message_value", "message_data", "message_nonce"}),
})
if err := db.Create(messages).Error; err != nil {
return fmt.Errorf("failed to insert message, error: %w", err)
}
return nil
}
// InsertFailedGatewayRouterTxs inserts a list of transactions that failed to interact with the gateway router into the database.
// These failed transactions are only fetched once, so they are inserted without checking for duplicates.
// To resolve unique index confliction, a random UUID will be generated and used as the MessageHash.
func (c *CrossMessage) InsertFailedGatewayRouterTxs(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
for _, message := range messages {
message.MessageHash = uuid.New().String()
}
if err := db.Create(messages).Error; err != nil {
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
}
return nil
}
// InsertOrUpdateL2RelayedMessagesOfL1Deposits inserts or updates the database with a list of L2 relayed messages related to L1 deposits.
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(l2RelayedMessages) == 0 {
return nil
}
// Deduplicate messages, for each message_hash, retaining message with the highest block number.
// This is necessary as a single message, like a FailedRelayedMessage or a reverted relayed transaction,
// may be relayed multiple times within certain block ranges, potentially leading to the error:
// "ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time (SQLSTATE 21000)".
// This happens if we attempt to insert multiple records with the same message_hash in a single db.Create operation.
// For example, see these transactions where the same message was relayed twice within certain block ranges:
// Reverted tx 1: https://sepolia.scrollscan.com/tx/0xcd6979277c3bc747445273a5e58ef1e9692fbe101d88cfefbbb69d3aef3193c0
// Reverted tx 2: https://sepolia.scrollscan.com/tx/0x43e28ed7cb71107c18c5d8ebbdb4a1d9cac73e60391d14d41e92985028faa337
// Another example:
// FailedRelayedMessage 1: https://sepolia.scrollscan.com/tx/0xfadb147fb211e5096446c5cac3ae0a8a705d2ece6c47c65135c8874f84638f17
// FailedRelayedMessage 2: https://sepolia.scrollscan.com/tx/0x6cb149b61afd07bf2e17561a59ebebde41e343b6610290c97515b2f862160b42
mergedL2RelayedMessages := make(map[string]*CrossMessage)
for _, message := range l2RelayedMessages {
if existing, found := mergedL2RelayedMessages[message.MessageHash]; found {
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
mergedL2RelayedMessages[message.MessageHash] = message
}
} else {
mergedL2RelayedMessages[message.MessageHash] = message
}
}
uniqueL2RelayedMessages := make([]*CrossMessage, 0, len(mergedL2RelayedMessages))
for _, msg := range mergedL2RelayedMessages {
uniqueL2RelayedMessages = append(uniqueL2RelayedMessages, msg)
}
// Do not update tx status of successfully or failed relayed messages,
// because if a message is handled, the later relayed message tx would be reverted.
// ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L2/L2ScrollMessenger.sol#L102
// e.g.,
// Successfully relayed: https://sepolia.scrollscan.com/tx/0x4eb7cb07ba76956259c0079819a34a146f8a93dd891dc94812e9b3d66b056ec7#eventlog
// Reverted tx 1 (Reason: Message was already successfully executed): https://sepolia.scrollscan.com/tx/0x1973cafa14eb40734df30da7bfd4d9aceb53f8f26e09d96198c16d0e2e4a95fd
// Reverted tx 2 (Reason: Message was already successfully executed): https://sepolia.scrollscan.com/tx/0x02fc3a28684a590aead2482022f56281539085bd3d273ac8dedc1ceccb2bc554
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoUpdates: clause.AssignmentColumns([]string{"message_type", "l2_block_number", "l2_tx_hash", "tx_status"}),
Where: clause.Where{
Exprs: []clause.Expression{
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
),
},
},
})
if err := db.Create(uniqueL2RelayedMessages).Error; err != nil {
return fmt.Errorf("failed to update L2 reverted relayed message of L1 deposit, error: %w", err)
}
return nil
}
// InsertOrUpdateL1RelayedMessagesOfL2Withdrawals inserts or updates the database with a list of L1 relayed messages related to L2 withdrawals.
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(l1RelayedMessages) == 0 {
return nil
}
// Deduplicate messages, for each message_hash, retaining message with the highest block number.
// This is necessary as a single message, like a FailedRelayedMessage or a reverted relayed transaction,
// may be relayed multiple times within certain block ranges, potentially leading to the error:
// "ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time (SQLSTATE 21000)".
// This happens if we attempt to insert multiple records with the same message_hash in a single db.Create operation.
// For example, see these transactions where the same message was relayed twice within certain block ranges:
// FailedRelayedMessage 1: https://sepolia.etherscan.io/tx/0x28b3212cda6ca0f3790f362a780257bbe2b37417ccf75a4eca6c3a08294c8f1b#eventlog
// FailedRelayedMessage 2: https://sepolia.etherscan.io/tx/0xc8a8254825dd2cab5caef58cfd8d88c077ceadadc78f2340214a86cf8ab88543#eventlog
// Another example (relayed success, then relayed again):
// Relay Message, and success: https://sepolia.etherscan.io/tx/0xcfdf2f5446719e3e123a8aa06e4d6b3809c3850a13adf875755c8b1e423aa448#eventlog
// Relay Message again, and reverted: https://sepolia.etherscan.io/tx/0xb1fcae7546f3de4cfd0b4d679f4075adb4eb69578b12e2b5673f5f24b1836578
mergedL1RelayedMessages := make(map[string]*CrossMessage)
for _, message := range l1RelayedMessages {
if existing, found := mergedL1RelayedMessages[message.MessageHash]; found {
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
mergedL1RelayedMessages[message.MessageHash] = message
}
} else {
mergedL1RelayedMessages[message.MessageHash] = message
}
}
uniqueL1RelayedMessages := make([]*CrossMessage, 0, len(mergedL1RelayedMessages))
for _, msg := range mergedL1RelayedMessages {
uniqueL1RelayedMessages = append(uniqueL1RelayedMessages, msg)
}
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoUpdates: clause.AssignmentColumns([]string{"message_type", "l1_block_number", "l1_tx_hash", "tx_status"}),
Where: clause.Where{
Exprs: []clause.Expression{
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
),
},
},
})
if err := db.Create(uniqueL1RelayedMessages).Error; err != nil {
return fmt.Errorf("failed to update L1 relayed message of L2 withdrawal, error: %w", err)
}
return nil
}

View File

@@ -1,57 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE cross_message_v2
(
id BIGSERIAL PRIMARY KEY,
message_type SMALLINT NOT NULL,
tx_status SMALLINT NOT NULL,
rollup_status SMALLINT NOT NULL,
token_type SMALLINT NOT NULL,
sender VARCHAR NOT NULL,
receiver VARCHAR NOT NULL,
message_hash VARCHAR DEFAULT NULL, -- NULL for failed txs
l1_tx_hash VARCHAR DEFAULT NULL,
l1_replay_tx_hash VARCHAR DEFAULT NULL,
l1_refund_tx_hash VARCHAR DEFAULT NULL,
l2_tx_hash VARCHAR DEFAULT NULL,
l1_block_number BIGINT DEFAULT NULL,
l2_block_number BIGINT DEFAULT NULL,
l1_token_address VARCHAR DEFAULT NULL,
l2_token_address VARCHAR DEFAULT NULL,
token_ids VARCHAR DEFAULT NULL,
token_amounts VARCHAR NOT NULL,
block_timestamp BIGINT NOT NULL, -- timestamp to sort L1 Deposit & L2 Withdraw events altogether
--- claim info
message_from VARCHAR DEFAULT NULL,
message_to VARCHAR DEFAULT NULL,
message_value VARCHAR DEFAULT NULL,
message_nonce BIGINT DEFAULT NULL,
message_data VARCHAR DEFAULT NULL,
merkle_proof BYTEA DEFAULT NULL,
batch_index BIGINT DEFAULT NULL,
-- metadata
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_cm_message_hash ON cross_message_v2 (message_hash);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_l1_block_number ON cross_message_v2 (message_type, l1_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_l2_block_number ON cross_message_v2 (message_type, l2_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_rollup_status_message_nonce ON cross_message_v2 (message_type, rollup_status, message_nonce DESC);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_message_nonce_tx_status_l2_block_number ON cross_message_v2 (message_type, message_nonce, tx_status, l2_block_number);
CREATE INDEX IF NOT EXISTS idx_cm_l1_tx_hash ON cross_message_v2 (l1_tx_hash);
CREATE INDEX IF NOT EXISTS idx_cm_l2_tx_hash ON cross_message_v2 (l2_tx_hash);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_tx_status_sender_block_timestamp ON cross_message_v2 (message_type, tx_status, sender, block_timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_sender_block_timestamp ON cross_message_v2 (message_type, sender, block_timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_cm_sender_block_timestamp ON cross_message_v2 (sender, block_timestamp DESC);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS cross_message_v2;
-- +goose StatementEnd

View File

@@ -1,28 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE batch_event_v2
(
id BIGSERIAL PRIMARY KEY,
l1_block_number BIGINT NOT NULL,
batch_status SMALLINT NOT NULL,
batch_index BIGINT NOT NULL,
batch_hash VARCHAR NOT NULL,
start_block_number BIGINT NOT NULL,
end_block_number BIGINT NOT NULL,
update_status SMALLINT NOT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE INDEX IF NOT EXISTS idx_be_l1_block_number ON batch_event_v2 (l1_block_number);
CREATE INDEX IF NOT EXISTS idx_be_batch_index ON batch_event_v2 (batch_index);
CREATE INDEX IF NOT EXISTS idx_be_batch_index_batch_hash ON batch_event_v2 (batch_index, batch_hash);
CREATE INDEX IF NOT EXISTS idx_be_end_block_number_update_status_batch_status_batch_index ON batch_event_v2 (end_block_number, update_status, batch_status, batch_index);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS batch_event_v2;
-- +goose StatementEnd

View File

@@ -1,35 +0,0 @@
package route
import (
"time"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"scroll-tech/common/observability"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/controller/api"
)
// Route routes the APIs
func Route(router *gin.Engine, conf *config.Config, reg prometheus.Registerer) {
router.Use(cors.New(cors.Config{
AllowOrigins: []string{"*"},
AllowMethods: []string{"GET", "POST"},
AllowHeaders: []string{"Origin", "Content-Type", "Authorization"},
AllowCredentials: true,
MaxAge: 12 * time.Hour,
}))
observability.Use(router, "bridge_history_api", reg)
r := router.Group("api/")
r.GET("/txs", api.HistoryCtrler.GetTxsByAddress)
r.GET("/l2/withdrawals", api.HistoryCtrler.GetL2WithdrawalsByAddress)
r.GET("/l2/unclaimed/withdrawals", api.HistoryCtrler.GetL2UnclaimedWithdrawalsByAddress)
r.POST("/txsbyhashes", api.HistoryCtrler.PostQueryTxsByHashes)
}

View File

@@ -1,132 +0,0 @@
package types
import (
"net/http"
"github.com/gin-gonic/gin"
"scroll-tech/bridge-history-api/internal/orm"
)
const (
// Success indicates that the operation was successful.
Success = 0
// InternalServerError represents a fatal error occurring on the server.
InternalServerError = 500
// ErrParameterInvalidNo represents an error when the parameters are invalid.
ErrParameterInvalidNo = 40001
// ErrGetL2ClaimableWithdrawalsError represents an error when trying to get L2 claimable withdrawal transactions.
ErrGetL2ClaimableWithdrawalsError = 40002
// ErrGetL2WithdrawalsError represents an error when trying to get L2 withdrawal transactions by address.
ErrGetL2WithdrawalsError = 40003
// ErrGetTxsError represents an error when trying to get transactions by address.
ErrGetTxsError = 40004
// ErrGetTxsByHashError represents an error when trying to get transactions by hash list.
ErrGetTxsByHashError = 40005
)
// QueryByAddressRequest the request parameter of address api
type QueryByAddressRequest struct {
Address string `form:"address" binding:"required"`
Page uint64 `form:"page" binding:"required,min=1"`
PageSize uint64 `form:"page_size" binding:"required,min=1,max=100"`
}
// QueryByHashRequest the request parameter of hash api
type QueryByHashRequest struct {
Txs []string `json:"txs" binding:"required,min=1,max=100"`
}
// ResultData contains return txs and total
type ResultData struct {
Results []*TxHistoryInfo `json:"results"`
Total uint64 `json:"total"`
}
// Response the response schema
type Response struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data interface{} `json:"data"`
}
// CounterpartChainTx is the schema of counterpart chain tx info
type CounterpartChainTx struct {
Hash string `json:"hash"`
BlockNumber uint64 `json:"block_number"`
}
// ClaimInfo is the schema of tx claim info
type ClaimInfo struct {
From string `json:"from"`
To string `json:"to"`
Value string `json:"value"`
Nonce string `json:"nonce"`
Message string `json:"message"`
Proof L2MessageProof `json:"proof"`
Claimable bool `json:"claimable"`
}
// L2MessageProof is the schema of L2 message proof
type L2MessageProof struct {
BatchIndex string `json:"batch_index"`
MerkleProof string `json:"merkle_proof"`
}
// TxHistoryInfo the schema of tx history infos
type TxHistoryInfo struct {
Hash string `json:"hash"`
ReplayTxHash string `json:"replay_tx_hash"`
RefundTxHash string `json:"refund_tx_hash"`
MessageHash string `json:"message_hash"`
TokenType orm.TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
TokenIDs []string `json:"token_ids"` // only for erc721 and erc1155
TokenAmounts []string `json:"token_amounts"` // for eth and erc20, the length is 1, for erc721 and erc1155, the length could be > 1
MessageType orm.MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
L1TokenAddress string `json:"l1_token_address"`
L2TokenAddress string `json:"l2_token_address"`
BlockNumber uint64 `json:"block_number"`
TxStatus orm.TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
CounterpartChainTx *CounterpartChainTx `json:"counterpart_chain_tx"`
ClaimInfo *ClaimInfo `json:"claim_info"`
BlockTimestamp uint64 `json:"block_timestamp"`
}
// RenderJSON renders response with json
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
var errMsg string
if err != nil {
errMsg = err.Error()
}
renderData := Response{
ErrCode: errCode,
ErrMsg: errMsg,
Data: data,
}
ctx.JSON(http.StatusOK, renderData)
}
// RenderSuccess renders success response with json
func RenderSuccess(ctx *gin.Context, data interface{}) {
RenderJSON(ctx, Success, nil, data)
}
// RenderFailure renders failure response with json
func RenderFailure(ctx *gin.Context, errCode int, err error) {
RenderJSON(ctx, errCode, err, nil)
}
// RenderFatal renders fatal response with json
func RenderFatal(ctx *gin.Context, err error) {
var errMsg string
if err != nil {
errMsg = err.Error()
}
renderData := Response{
ErrCode: InternalServerError,
ErrMsg: errMsg,
Data: nil,
}
ctx.Set("errcode", InternalServerError)
ctx.JSON(http.StatusInternalServerError, renderData)
}

View File

@@ -1,185 +0,0 @@
package utils
import (
"context"
"encoding/binary"
"errors"
"fmt"
"math/big"
"strings"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
backendabi "scroll-tech/bridge-history-api/abi"
)
// Keccak2 compute the keccack256 of two concatenations of bytes32
func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
}
// GetBlockNumber get the current block number minus the confirmations
func GetBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
number, err := client.BlockNumber(ctx)
if err != nil || number <= confirmations {
return 0, err
}
number = number - confirmations
return number, nil
}
// UnpackLog unpacks a retrieved log into the provided output structure.
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
sender common.Address,
target common.Address,
value *big.Int,
messageNonce *big.Int,
message []byte,
) common.Hash {
data, _ := backendabi.IL2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
return common.BytesToHash(crypto.Keccak256(data))
}
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
// GetBatchRangeFromCalldata find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldata(calldata []byte) (uint64, uint64, error) {
method := backendabi.IScrollChainABI.Methods["commitBatch"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
// special case: import genesis batch
method = backendabi.IScrollChainABI.Methods["importGenesisBatch"]
_, err2 := method.Inputs.Unpack(calldata[4:])
if err2 == nil {
// genesis batch
return 0, 0, nil
}
// none of "commitBatch" and "importGenesisBatch" match, give up
return 0, 0, err
}
args := commitBatchArgs{}
err = method.Inputs.Copy(&args, values)
if err != nil {
return 0, 0, err
}
var startBlock uint64
var finishBlock uint64
// decode blocks from chunk and assume that there's no empty chunk
// | 1 byte | 60 bytes | ... | 60 bytes |
// | num blocks | block 1 | ... | block n |
if len(args.Chunks) == 0 {
return 0, 0, errors.New("invalid chunks")
}
chunk := args.Chunks[0]
block := chunk[1:61] // first block in chunk
startBlock = binary.BigEndian.Uint64(block[0:8])
chunk = args.Chunks[len(args.Chunks)-1]
lastBlockIndex := int(chunk[0]) - 1
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
finishBlock = binary.BigEndian.Uint64(block[0:8])
return startBlock, finishBlock, err
}
// GetBlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
func GetBlocksInRange(ctx context.Context, cli *ethclient.Client, start, end uint64) ([]*types.Block, error) {
var (
eg errgroup.Group
blocks = make([]*types.Block, end-start+1)
concurrency = 32
sem = make(chan struct{}, concurrency)
)
for i := start; i <= end; i++ {
sem <- struct{}{} // Acquire a slot in the semaphore
blockNum := int64(i)
index := i - start
eg.Go(func() error {
defer func() { <-sem }() // Release the slot when done
block, err := cli.BlockByNumber(ctx, big.NewInt(blockNum))
if err != nil {
log.Error("Failed to fetch block number", "number", blockNum, "error", err)
return err
}
blocks[index] = block
return nil
})
}
if err := eg.Wait(); err != nil {
log.Error("Error waiting for block fetching routines", "error", err)
return nil, err
}
return blocks, nil
}
// ConvertBigIntArrayToString convert the big int array to string
func ConvertBigIntArrayToString(array []*big.Int) string {
stringArray := make([]string, len(array))
for i, num := range array {
stringArray[i] = num.String()
}
result := strings.Join(stringArray, ", ")
return result
}
// ConvertStringToStringArray takes a string with values separated by commas and returns a slice of strings
func ConvertStringToStringArray(s string) []string {
if s == "" {
return []string{}
}
stringParts := strings.Split(s, ",")
for i, part := range stringParts {
stringParts[i] = strings.TrimSpace(part)
}
return stringParts
}
// GetSkippedQueueIndices gets the skipped queue indices
func GetSkippedQueueIndices(startIndex uint64, skippedBitmap *big.Int) []uint64 {
var indices []uint64
for i := 0; i < 256; i++ {
index := startIndex + uint64(i)
bit := new(big.Int).Rsh(skippedBitmap, uint(i))
if bit.Bit(0) == 0 {
continue
}
indices = append(indices, index)
}
return indices
}

View File

@@ -1,91 +0,0 @@
package utils
import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
a := common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0")
b := common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c")
c := Keccak2(a, b)
assert.NotEmpty(t, c)
assert.NotEqual(t, a, c)
assert.NotEqual(t, b, c)
assert.Equal(t, "0xc0ffbd7f501bd3d49721b0724b2bff657cb2378f15d5a9b97cd7ea5bf630d512", c.Hex())
}
func TestGetBatchRangeFromCalldata(t *testing.T) {
// single chunk
start, finish, err := GetBatchRangeFromCalldata(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003d0100000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000100000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000"))
assert.NoError(t, err)
assert.Equal(t, start, uint64(1))
assert.Equal(t, finish, uint64(1))
// multiple chunk
start, finish, err = GetBatchRangeFromCalldata(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000007900000000000000000100000000000000010000000000000001038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000004c01000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030000000000010000000001000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b403000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005000300000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00050000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c01000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa"))
assert.NoError(t, err)
assert.Equal(t, start, uint64(10))
assert.Equal(t, finish, uint64(20))
// genesis batch
start, finish, err = GetBatchRangeFromCalldata(common.Hex2Bytes("3fdeecb200000000000000000000000000000000000000000000000000000000000000402dcb5308098d24a37fc1487a229fcedb09fa4343ede39cbad365bc925535bb09000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000c252bc9780c4d83cf11f14b8cd03c92c4d18ce07710ba836d31d12da216c8330000000000000000000000000000000000000000000000000000000000000000000000000000000"))
assert.NoError(t, err)
assert.Equal(t, start, uint64(0))
assert.Equal(t, finish, uint64(0))
}
// TestConvertBigIntArrayToString tests the ConvertBigIntArrayToString function
func TestConvertBigIntArrayToString(t *testing.T) {
tests := []struct {
array []*big.Int
expected string
}{
{[]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, "1, 2, 3"},
{[]*big.Int{big.NewInt(0), big.NewInt(-1)}, "0, -1"},
{[]*big.Int{}, ""},
}
for _, test := range tests {
got := ConvertBigIntArrayToString(test.array)
assert.Equal(t, test.expected, got)
}
}
// TestConvertStringToStringArray tests the ConvertStringToStringArray function
func TestConvertStringToStringArray(t *testing.T) {
tests := []struct {
s string
expected []string
}{
{"1, 2, 3", []string{"1", "2", "3"}},
{" 4 , 5 , 6 ", []string{"4", "5", "6"}},
{"", []string{}},
}
for _, test := range tests {
got := ConvertStringToStringArray(test.s)
assert.Equal(t, test.expected, got)
}
}
// TestGetSkippedQueueIndices tests the GetSkippedQueueIndices function
func TestGetSkippedQueueIndices(t *testing.T) {
tests := []struct {
startIndex uint64
bitmap *big.Int
expected []uint64
}{
{0, big.NewInt(0b101), []uint64{0, 2}},
{10, big.NewInt(0b110), []uint64{11, 12}},
{0, big.NewInt(0), nil}, // No bits set
}
for _, test := range tests {
got := GetSkippedQueueIndices(test.startIndex, test.bitmap)
assert.Equal(t, test.expected, got)
}
}

View File

@@ -1,188 +0,0 @@
package utils
import (
"github.com/scroll-tech/go-ethereum/common"
)
// MaxHeight is the maximum possible height of withdrawal trie
const MaxHeight = 40
// WithdrawTrie is an append only merkle trie
type WithdrawTrie struct {
// used to rebuild the merkle tree
NextMessageNonce uint64
height int // current height of withdraw trie
branches []common.Hash
zeroes []common.Hash
}
// NewWithdrawTrie will return a new instance of WithdrawTrie
func NewWithdrawTrie() *WithdrawTrie {
zeroes := make([]common.Hash, MaxHeight)
branches := make([]common.Hash, MaxHeight)
zeroes[0] = common.Hash{}
for i := 1; i < MaxHeight; i++ {
zeroes[i] = Keccak2(zeroes[i-1], zeroes[i-1])
}
return &WithdrawTrie{
zeroes: zeroes,
branches: branches,
height: -1,
NextMessageNonce: 0,
}
}
// Initialize will initialize the merkle trie with rightest leaf node
func (w *WithdrawTrie) Initialize(currentMessageNonce uint64, msgHash common.Hash, proofBytes []byte) {
proof := decodeBytesToMerkleProof(proofBytes)
branches := recoverBranchFromProof(proof, currentMessageNonce, msgHash)
w.height = len(proof)
w.branches = branches
w.NextMessageNonce = currentMessageNonce + 1
}
// AppendMessages appends a list of new messages as leaf nodes to the rightest of the tree and returns the proofs for all messages.
// The function correctly returns the proofs for the entire tree after all messages have been inserted, not the individual proofs after each insertion.
func (w *WithdrawTrie) AppendMessages(hashes []common.Hash) [][]byte {
length := len(hashes)
if length == 0 {
return make([][]byte, 0)
}
cache := make([]map[uint64]common.Hash, MaxHeight)
for h := 0; h < MaxHeight; h++ {
cache[h] = make(map[uint64]common.Hash)
}
// cache all branches will be used later.
if w.NextMessageNonce != 0 {
index := w.NextMessageNonce
for h := 0; h <= w.height; h++ {
if index%2 == 1 {
// right child, `w.branches[h]` is the corresponding left child
// the index of left child should be `index ^ 1`.
cache[h][index^1] = w.branches[h]
}
index >>= 1
}
}
// cache all new leaves
for i := 0; i < length; i++ {
cache[0][w.NextMessageNonce+uint64(i)] = hashes[i]
}
// build withdraw trie with new hashes
minIndex := w.NextMessageNonce
maxIndex := w.NextMessageNonce + uint64(length) - 1
for h := 0; maxIndex > 0; h++ {
if minIndex%2 == 1 {
minIndex--
}
if maxIndex%2 == 0 {
cache[h][maxIndex^1] = w.zeroes[h]
}
for i := minIndex; i <= maxIndex; i += 2 {
cache[h+1][i>>1] = Keccak2(cache[h][i], cache[h][i^1])
}
minIndex >>= 1
maxIndex >>= 1
}
// update branches using hashes one by one
for i := 0; i < length; i++ {
proof := updateBranchWithNewMessage(w.zeroes, w.branches, w.NextMessageNonce, hashes[i])
w.NextMessageNonce++
w.height = len(proof)
}
proofs := make([][]byte, length)
// retrieve merkle proof from cache
for i := 0; i < length; i++ {
index := w.NextMessageNonce + uint64(i) - uint64(length)
var merkleProof []common.Hash
for h := 0; h < w.height; h++ {
merkleProof = append(merkleProof, cache[h][index^1])
index >>= 1
}
proofs[i] = encodeMerkleProofToBytes(merkleProof)
}
return proofs
}
// MessageRoot return the current root hash of withdraw trie.
func (w *WithdrawTrie) MessageRoot() common.Hash {
if w.height == -1 {
return common.Hash{}
}
return w.branches[w.height]
}
// decodeBytesToMerkleProof transfer byte array to bytes32 array. The caller should make sure the length is matched.
func decodeBytesToMerkleProof(proofBytes []byte) []common.Hash {
proof := make([]common.Hash, len(proofBytes)/32)
for i := 0; i < len(proofBytes); i += 32 {
proof[i/32] = common.BytesToHash(proofBytes[i : i+32])
}
return proof
}
// encodeMerkleProofToBytes transfer byte32 array to byte array by concatenation.
func encodeMerkleProofToBytes(proof []common.Hash) []byte {
var proofBytes []byte
for i := 0; i < len(proof); i++ {
proofBytes = append(proofBytes, proof[i][:]...)
}
return proofBytes
}
// updateBranchWithNewMessage update the branches to latest with new message and return the merkle proof for the message.
func updateBranchWithNewMessage(zeroes []common.Hash, branches []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
root := msgHash
var merkleProof []common.Hash
var height uint64
for height = 0; index > 0; height++ {
if index%2 == 0 {
// it may be used in next round.
branches[height] = root
merkleProof = append(merkleProof, zeroes[height])
// it's a left child, the right child must be null
root = Keccak2(root, zeroes[height])
} else {
// it's a right child, use previously computed hash
root = Keccak2(branches[height], root)
merkleProof = append(merkleProof, branches[height])
}
index >>= 1
}
branches[height] = root
return merkleProof
}
// recoverBranchFromProof will recover latest branches from merkle proof and message hash
func recoverBranchFromProof(proof []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
branches := make([]common.Hash, 64)
root := msgHash
var height uint64
for height = 0; index > 0; height++ {
if index%2 == 0 {
branches[height] = root
// it's a left child, the right child must be null
root = Keccak2(root, proof[height])
} else {
// it's a right child, use previously computed hash
branches[height] = proof[height]
root = Keccak2(proof[height], root)
}
index >>= 1
}
branches[height] = root
for height++; height < 64; height++ {
branches[height] = common.Hash{}
}
return branches
}

View File

@@ -1,210 +0,0 @@
package utils
import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestUpdateBranchWithNewMessage(t *testing.T) {
zeroes := make([]common.Hash, 64)
branches := make([]common.Hash, 64)
zeroes[0] = common.Hash{}
for i := 1; i < 64; i++ {
zeroes[i] = Keccak2(zeroes[i-1], zeroes[i-1])
}
updateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
if branches[0] != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") {
t.Fatalf("Invalid root, want %s, got %s", "0x0000000000000000000000000000000000000000000000000000000000000001", branches[0].Hex())
}
updateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
if branches[1] != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
t.Fatalf("Invalid root, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", branches[1].Hex())
}
updateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
if branches[2] != common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c") {
t.Fatalf("Invalid root, want %s, got %s", "0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c", branches[2].Hex())
}
updateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
if branches[2] != common.HexToHash("0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36") {
t.Fatalf("Invalid root, want %s, got %s", "0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36", branches[2].Hex())
}
}
func TestDecodeEncodeMerkleProof(t *testing.T) {
proof := decodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
if len(proof) != 4 {
t.Fatalf("proof length mismatch, want %d, got %d", 4, len(proof))
}
if proof[0] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4901") {
t.Fatalf("proof[0] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4901", proof[0].Hex())
}
if proof[1] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4902") {
t.Fatalf("proof[1] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4902", proof[0].Hex())
}
if proof[2] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4903") {
t.Fatalf("proof[2] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4903", proof[0].Hex())
}
if proof[3] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904") {
t.Fatalf("proof[3] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904", proof[0].Hex())
}
bytes := encodeMerkleProofToBytes(proof)
if common.Bytes2Hex(bytes) != "2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904" {
t.Fatalf("wrong encoded bytes")
}
}
func TestRecoverBranchFromProof(t *testing.T) {
zeroes := make([]common.Hash, 64)
branches := make([]common.Hash, 64)
zeroes[0] = common.Hash{}
for i := 1; i < 64; i++ {
zeroes[i] = Keccak2(zeroes[i-1], zeroes[i-1])
}
proof := updateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
tmpBranches := recoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = updateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
tmpBranches = recoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = updateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
tmpBranches = recoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = updateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
tmpBranches = recoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
}
func TestWithdrawTrieOneByOne(t *testing.T) {
for initial := 0; initial < 128; initial++ {
withdrawTrie := NewWithdrawTrie()
var hashes []common.Hash
for i := 0; i < initial; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
withdrawTrie.AppendMessages([]common.Hash{
hash,
})
}
for i := initial; i < 128; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
expectedRoot := computeMerkleRoot(hashes)
proofBytes := withdrawTrie.AppendMessages([]common.Hash{
hash,
})
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(i+1))
assert.Equal(t, expectedRoot.String(), withdrawTrie.MessageRoot().String())
proof := decodeBytesToMerkleProof(proofBytes[0])
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
assert.Equal(t, expectedRoot.String(), verifiedRoot.String())
}
}
}
func TestWithdrawTrieMultiple(t *testing.T) {
var expectedRoots []common.Hash
{
var hashes []common.Hash
for i := 0; i < 128; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
expectedRoots = append(expectedRoots, computeMerkleRoot(hashes))
}
}
for initial := 0; initial < 100; initial++ {
var hashes []common.Hash
for i := 0; i < initial; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
}
for finish := initial; finish < 100; finish++ {
withdrawTrie := NewWithdrawTrie()
withdrawTrie.AppendMessages(hashes)
var newHashes []common.Hash
for i := initial; i <= finish; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
newHashes = append(newHashes, hash)
}
proofBytes := withdrawTrie.AppendMessages(newHashes)
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(finish+1))
assert.Equal(t, expectedRoots[finish].String(), withdrawTrie.MessageRoot().String())
for i := initial; i <= finish; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
proof := decodeBytesToMerkleProof(proofBytes[i-initial])
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
assert.Equal(t, expectedRoots[finish].String(), verifiedRoot.String())
}
}
}
}
func verifyMerkleProof(index uint64, leaf common.Hash, proof []common.Hash) common.Hash {
root := leaf
for _, h := range proof {
if index%2 == 0 {
root = Keccak2(root, h)
} else {
root = Keccak2(h, root)
}
index >>= 1
}
return root
}
func computeMerkleRoot(hashes []common.Hash) common.Hash {
if len(hashes) == 0 {
return common.Hash{}
}
zeroHash := common.Hash{}
for {
if len(hashes) == 1 {
break
}
var newHashes []common.Hash
for i := 0; i < len(hashes); i += 2 {
if i+1 < len(hashes) {
newHashes = append(newHashes, Keccak2(hashes[i], hashes[i+1]))
} else {
newHashes = append(newHashes, Keccak2(hashes[i], zeroHash))
}
}
hashes = newHashes
zeroHash = Keccak2(zeroHash, zeroHash)
}
return hashes[0]
}

View File

@@ -0,0 +1,11 @@
package model
type QueryByAddressRequest struct {
Address string `url:"address"`
Offset int `url:"offset"`
Limit int `url:"limit"`
}
type QueryByHashRequest struct {
Txs []string `url:"txs"`
}

View File

@@ -0,0 +1,18 @@
package model
import "bridge-history-api/service"
type Data struct {
Result []*service.TxHistoryInfo `json:"result"`
Total int `json:"total"`
}
type QueryByAddressResponse struct {
Message string `json:"message"`
Data *Data `json:"data"`
}
type QueryByHashResponse struct {
Message string `json:"message"`
Data *Data `json:"data"`
}

View File

@@ -0,0 +1,141 @@
package service
import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"bridge-history-api/db"
"bridge-history-api/db/orm"
)
type Finalized struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss
}
type TxHistoryInfo struct {
Hash string `json:"hash"`
Amount string `json:"amount"`
To string `json:"to"` // useless
IsL1 bool `json:"isL1"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp *time.Time `json:"blockTimestamp"` // useless
FinalizeTx *Finalized `json:"finalizeTx"`
CreatedAt *time.Time `json:"createdTime"`
}
// HistoryService example service.
type HistoryService interface {
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, error)
GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error)
}
// NewHistoryService returns a service backed with a "db"
func NewHistoryService(db db.OrmFactory) HistoryService {
service := &historyBackend{db: db, prefix: "Scroll-Bridge-History-Server"}
return service
}
type historyBackend struct {
prefix string
db db.OrmFactory
}
func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory) {
relayed, err := db.GetRelayedMsgByHash(msgHash)
if err != nil {
log.Error("updateCrossTxHash failed", "error", err)
return
}
if relayed == nil {
return
}
if relayed.Layer1Hash != "" {
txInfo.FinalizeTx.Hash = relayed.Layer1Hash
txInfo.FinalizeTx.BlockNumber = relayed.Height
return
}
if relayed.Layer2Hash != "" {
txInfo.FinalizeTx.Hash = relayed.Layer2Hash
txInfo.FinalizeTx.BlockNumber = relayed.Height
return
}
}
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, error) {
txHistories := make([]*TxHistoryInfo, 0)
result, err := h.db.GetCrossMsgsByAddressWithOffset(address.String(), offset, limit)
if err != nil {
return nil, err
}
for _, msg := range result {
txHistory := &TxHistoryInfo{
Hash: msg.MsgHash,
Amount: msg.Amount,
To: msg.Target,
IsL1: msg.MsgType == int(orm.Layer1Msg),
BlockNumber: msg.Height,
CreatedAt: msg.CreatedAt,
FinalizeTx: &Finalized{
Hash: "",
},
}
updateCrossTxHash(msg.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
}
return txHistories, nil
}
func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) {
txHistories := make([]*TxHistoryInfo, 0)
for _, hash := range hashes {
l1result, err := h.db.GetL1CrossMsgByHash(common.HexToHash(hash))
if err != nil {
return nil, err
}
if l1result != nil {
txHistory := &TxHistoryInfo{
Hash: l1result.Layer1Hash,
Amount: l1result.Amount,
To: l1result.Target,
IsL1: true,
BlockNumber: l1result.Height,
CreatedAt: l1result.CreatedAt,
FinalizeTx: &Finalized{
Hash: "",
},
}
updateCrossTxHash(l1result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
continue
}
l2result, err := h.db.GetL2CrossMsgByHash(common.HexToHash(hash))
if err != nil {
return nil, err
}
if l2result != nil {
txHistory := &TxHistoryInfo{
Hash: l2result.Layer2Hash,
Amount: l2result.Amount,
To: l2result.Target,
IsL1: false,
BlockNumber: l2result.Height,
CreatedAt: l2result.CreatedAt,
FinalizeTx: &Finalized{
Hash: "",
},
}
updateCrossTxHash(l2result.MsgHash, txHistory, h.db)
txHistories = append(txHistories, txHistory)
continue
}
}
return txHistories, nil
}

View File

@@ -0,0 +1,69 @@
package utils
import (
"github.com/urfave/cli/v2"
)
var (
// CommonFlags is used for app common flags in different modules
CommonFlags = []cli.Flag{
&ConfigFileFlag,
&VerbosityFlag,
&LogFileFlag,
&LogJSONFormat,
&LogDebugFlag,
&MetricsEnabled,
&MetricsAddr,
&MetricsPort,
}
// ConfigFileFlag load json type config file.
ConfigFileFlag = cli.StringFlag{
Name: "config",
Usage: "JSON configuration file",
Value: "./config.json",
}
// VerbosityFlag log level.
VerbosityFlag = cli.IntFlag{
Name: "verbosity",
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail",
Value: 3,
}
// LogFileFlag decides where the logger output is sent. If this flag is left
// empty, it will log to stdout.
LogFileFlag = cli.StringFlag{
Name: "log.file",
Usage: "Tells the module where to write log entries",
}
// LogJSONFormat decides the log format is json or not
LogJSONFormat = cli.BoolFlag{
Name: "log.json",
Usage: "Tells the module whether log format is json or not",
Value: true,
}
// LogDebugFlag make log messages with call-site location
LogDebugFlag = cli.BoolFlag{
Name: "log.debug",
Usage: "Prepends log messages with call-site location (file and line number)",
}
// MetricsEnabled enable metrics collection and reporting
MetricsEnabled = cli.BoolFlag{
Name: "metrics",
Usage: "Enable metrics collection and reporting",
Category: "METRICS",
Value: false,
}
// MetricsAddr is listening address of Metrics reporting server
MetricsAddr = cli.StringFlag{
Name: "metrics.addr",
Usage: "Metrics reporting server listening address",
Category: "METRICS",
Value: "127.0.0.1",
}
// MetricsPort is listening port of Metrics reporting server
MetricsPort = cli.IntFlag{
Name: "metrics.port",
Usage: "Metrics reporting server listening port",
Category: "METRICS",
Value: 6060,
}
)

View File

@@ -0,0 +1,43 @@
package utils
import (
"io"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
)
// LogSetup is for setup logger
func LogSetup(ctx *cli.Context) error {
var ostream log.Handler
if logFile := ctx.String(LogFileFlag.Name); len(logFile) > 0 {
fp, err := os.OpenFile(filepath.Clean(logFile), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
utils.Fatalf("Failed to open log file", "err", err)
}
if ctx.Bool(LogJSONFormat.Name) {
ostream = log.StreamHandler(io.Writer(fp), log.JSONFormat())
} else {
ostream = log.StreamHandler(io.Writer(fp), log.TerminalFormat(true))
}
} else {
output := io.Writer(os.Stderr)
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
ostream = log.StreamHandler(output, log.TerminalFormat(usecolor))
}
// show the call file and line number
log.PrintOrigins(ctx.Bool(LogDebugFlag.Name))
glogger := log.NewGlogHandler(ostream)
// Set log level
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
return nil
}

View File

@@ -0,0 +1,56 @@
package utils
import (
"context"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
backendabi "bridge-history-api/abi"
)
func GetSafeBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
number, err := client.BlockNumber(ctx)
if err != nil || number <= confirmations {
return 0, err
}
number = number - confirmations
return number, nil
}
// UnpackLog unpacks a retrieved log into the provided output structure.
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
sender common.Address,
target common.Address,
value *big.Int,
messageNonce *big.Int,
message []byte,
) common.Hash {
data, _ := backendabi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
return common.BytesToHash(crypto.Keccak256(data))
}

View File

@@ -1,15 +1,17 @@
.PHONY: mock_abi rollup_bins event_watcher gas_oracle rollup_relayer test lint clean docker
.PHONY: lint docker clean bridge
IMAGE_NAME=bridge
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
mock_abi:
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./rollup/mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out ./rollup/mock_bridge/MockBridgeL1.go
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol ./rollup/mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out ./rollup/mock_bridge/MockBridgeL2.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
rollup_bins: ## Builds the Rollup bins.
bridge_bins: ## Builds the Bridge bins.
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
event_watcher: ## Builds the event_watcher bin
@@ -18,6 +20,9 @@ event_watcher: ## Builds the event_watcher bin
gas_oracle: ## Builds the gas_oracle bin
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
message_relayer: ## Builds the message_relayer bin
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
rollup_relayer: ## Builds the rollup_relayer bin
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
@@ -34,8 +39,10 @@ docker_push:
docker docker push scrolltech/gas-oracle:${IMAGE_VERSION}
docker docker push scrolltech/event-watcher:${IMAGE_VERSION}
docker docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
docker docker push scrolltech/msg-relayer:${IMAGE_VERSION}
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/gas-oracle:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/gas_oracle.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/event-watcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/event_watcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/rollup-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/rollup_relayer.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/msg-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/msg_relayer.Dockerfile

36
bridge/README.md Normal file
View File

@@ -0,0 +1,36 @@
# Bridge
This repo contains the Scroll bridge.
In addition, launching the bridge will launch a separate instance of l2geth, and sets up a communication channel
between the two, over JSON-RPC sockets.
Something we should pay attention is that all private keys inside sender instance cannot be duplicated.
## Dependency
+ install `abigen`
``` bash
go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
```
## Build
```bash
make clean
make bridge
```
## Start
* use default ports and config.json
```bash
./build/bin/bridge --http
```
* use specified ports and config.json
```bash
./build/bin/bridge --config ./config.json --http --http.addr localhost --http.port 8290
```

242
bridge/abi/bridge_abi.go Normal file
View File

@@ -0,0 +1,242 @@
package bridgeabi
import (
"math/big"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
)
var (
// ScrollChainABI holds information about ScrollChain's context and available invokable methods.
ScrollChainABI *abi.ABI
// L1ScrollMessengerABI holds information about L1ScrollMessenger's context and available invokable methods.
L1ScrollMessengerABI *abi.ABI
// L1MessageQueueABI holds information about L1MessageQueue contract's context and available invokable methods.
L1MessageQueueABI *abi.ABI
// L2GasPriceOracleABI holds information about L2GasPriceOracle's context and available invokable methods.
L2GasPriceOracleABI *abi.ABI
// L2ScrollMessengerABI holds information about L2ScrollMessenger's context and available invokable methods.
L2ScrollMessengerABI *abi.ABI
// L1BlockContainerABI holds information about L1BlockContainer contract's context and available invokable methods.
L1BlockContainerABI *abi.ABI
// L1GasPriceOracleABI holds information about L1GasPriceOracle's context and available invokable methods.
L1GasPriceOracleABI *abi.ABI
// L2MessageQueueABI holds information about L2MessageQueue contract's context and available invokable methods.
L2MessageQueueABI *abi.ABI
// L1SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes)")
L1SentMessageEventSignature common.Hash
// L1RelayedMessageEventSignature = keccak256("RelayedMessage(bytes32)")
L1RelayedMessageEventSignature common.Hash
// L1FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
L1FailedRelayedMessageEventSignature common.Hash
// L1CommitBatchEventSignature = keccak256("CommitBatch(bytes32)")
L1CommitBatchEventSignature common.Hash
// L1FinalizeBatchEventSignature = keccak256("FinalizeBatch(bytes32)")
L1FinalizeBatchEventSignature common.Hash
// L1QueueTransactionEventSignature = keccak256("QueueTransaction(address,address,uint256,uint256,uint256,bytes)")
L1QueueTransactionEventSignature common.Hash
// L2SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
L2SentMessageEventSignature common.Hash
// L2RelayedMessageEventSignature = keccak256("RelayedMessage(bytes32)")
L2RelayedMessageEventSignature common.Hash
// L2FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
L2FailedRelayedMessageEventSignature common.Hash
// L2ImportBlockEventSignature = keccak256("ImportBlock(bytes32,uint256,uint256,uint256,bytes32)")
L2ImportBlockEventSignature common.Hash
// L2AppendMessageEventSignature = keccak256("AppendMessage(uint256,bytes32)")
L2AppendMessageEventSignature common.Hash
)
func init() {
ScrollChainABI, _ = ScrollChainMetaData.GetAbi()
L1ScrollMessengerABI, _ = L1ScrollMessengerMetaData.GetAbi()
L1MessageQueueABI, _ = L1MessageQueueMetaData.GetAbi()
L2GasPriceOracleABI, _ = L2GasPriceOracleMetaData.GetAbi()
L2ScrollMessengerABI, _ = L2ScrollMessengerMetaData.GetAbi()
L1BlockContainerABI, _ = L1BlockContainerMetaData.GetAbi()
L2MessageQueueABI, _ = L2MessageQueueMetaData.GetAbi()
L1GasPriceOracleABI, _ = L1GasPriceOracleMetaData.GetAbi()
L1SentMessageEventSignature = L1ScrollMessengerABI.Events["SentMessage"].ID
L1RelayedMessageEventSignature = L1ScrollMessengerABI.Events["RelayedMessage"].ID
L1FailedRelayedMessageEventSignature = L1ScrollMessengerABI.Events["FailedRelayedMessage"].ID
L1CommitBatchEventSignature = ScrollChainABI.Events["CommitBatch"].ID
L1FinalizeBatchEventSignature = ScrollChainABI.Events["FinalizeBatch"].ID
L1QueueTransactionEventSignature = L1MessageQueueABI.Events["QueueTransaction"].ID
L2SentMessageEventSignature = L2ScrollMessengerABI.Events["SentMessage"].ID
L2RelayedMessageEventSignature = L2ScrollMessengerABI.Events["RelayedMessage"].ID
L2FailedRelayedMessageEventSignature = L2ScrollMessengerABI.Events["FailedRelayedMessage"].ID
L2ImportBlockEventSignature = L1BlockContainerABI.Events["ImportBlock"].ID
L2AppendMessageEventSignature = L2MessageQueueABI.Events["AppendMessage"].ID
}
// Generated manually from abigen and only necessary events and mutable calls are kept.
// ScrollChainMetaData contains all meta data concerning the ScrollChain contract.
var ScrollChainMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"numTransactions\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"numL1Messages\",\"type\":\"uint16\"}],\"internalType\":\"structIScrollChain.BlockContext[]\",\"name\":\"blocks\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawTrieRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"batchIndex\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"parentBatchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"l2Transactions\",\"type\":\"bytes\"}],\"internalType\":\"structIScrollChain.Batch\",\"name\":\"batch\",\"type\":\"tuple\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"numTransactions\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"numL1Messages\",\"type\":\"uint16\"}],\"internalType\":\"structIScrollChain.BlockContext[]\",\"name\":\"blocks\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawTrieRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"batchIndex\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"parentBatchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"l2Transactions\",\"type\":\"bytes\"}],\"internalType\":\"structIScrollChain.Batch[]\",\"name\":\"batches\",\"type\":\"tuple[]\"}],\"name\":\"commitBatches\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256[]\",\"name\":\"proof\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"instances\",\"type\":\"uint256[]\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"getL2MessageRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchId\",\"type\":\"bytes32\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
}
// L1ScrollMessengerMetaData contains all meta data concerning the L1ScrollMessenger contract.
var L1ScrollMessengerMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"merkleProof\",\"type\":\"bytes\"}],\"internalType\":\"structIL1ScrollMessenger.L2MessageProof\",\"name\":\"proof\",\"type\":\"tuple\"}],\"name\":\"relayMessageWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"oldGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"newGasLimit\",\"type\":\"uint32\"}],\"name\":\"replayMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"xDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// L1MessageQueueMetaData contains all meta data concerning the L1MessageQueue contract.
var L1MessageQueueMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"queueIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"QueueTransaction\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"appendCrossDomainMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"appendEnforcedTransaction\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"queueIndex\",\"type\":\"uint256\"}],\"name\":\"getCrossDomainMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextCrossDomainMessageIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// L2GasPriceOracleMetaData contains all meta data concerning the L2GasPriceOracle contract.
var L2GasPriceOracleMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"L2BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l2BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL2BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// L2ScrollMessengerMetaData contains all meta data concerning the L2ScrollMessenger contract.
var L2ScrollMessengerMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"relayMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"stateRootProof\",\"type\":\"bytes\"}],\"internalType\":\"structIL2ScrollMessenger.L1MessageProof\",\"name\":\"proof\",\"type\":\"tuple\"}],\"name\":\"retryMessageWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"xDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// L1BlockContainerMetaData contains all meta data concerning the L1BlockContainer contract.
var L1BlockContainerMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockHeight\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockTimestamp\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"}],\"name\":\"ImportBlock\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"name\":\"getBlockTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"name\":\"getStateRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"blockHeaderRLP\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"updateGasPriceOracle\",\"type\":\"bool\"}],\"name\":\"importBlockHeader\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBlockHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBlockNumber\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestBlockTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// L2MessageQueueMetaData contains all meta data concerning the L2MessageQueue contract.
var L2MessageQueueMetaData = &bind.MetaData{
ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"AppendMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_messageHash\",\"type\":\"bytes32\"}],\"name\":\"appendMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"branches\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messageRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messenger\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextMessageIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_messenger\",\"type\":\"address\"}],\"name\":\"updateMessenger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
}
// L1GasPriceOracleMetaData contains all meta data concerning the L1GasPriceOracle contract.
var L1GasPriceOracleMetaData = &bind.MetaData{
ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_overhead\",\"type\":\"uint256\"}],\"name\":\"setOverhead\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_scalar\",\"type\":\"uint256\"}],\"name\":\"setScalar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
}
// IL1ScrollMessengerL2MessageProof is an auto generated low-level Go binding around an user-defined struct.
type IL1ScrollMessengerL2MessageProof struct {
BatchHash common.Hash
MerkleProof []byte
}
// IScrollChainBatch is an auto generated low-level Go binding around an user-defined struct.
type IScrollChainBatch struct {
Blocks []IScrollChainBlockContext
PrevStateRoot common.Hash
NewStateRoot common.Hash
WithdrawTrieRoot common.Hash
BatchIndex uint64
ParentBatchHash common.Hash
L2Transactions []byte
}
// IScrollChainBlockContext is an auto generated low-level Go binding around an user-defined struct.
type IScrollChainBlockContext struct {
BlockHash common.Hash
ParentHash common.Hash
BlockNumber uint64
Timestamp uint64
BaseFee *big.Int
GasLimit uint64
NumTransactions uint16
NumL1Messages uint16
}
// L1CommitBatchEvent represents a CommitBatch event raised by the ScrollChain contract.
type L1CommitBatchEvent struct {
BatchHash common.Hash
}
// L1FinalizeBatchEvent represents a FinalizeBatch event raised by the ScrollChain contract.
type L1FinalizeBatchEvent struct {
BatchHash common.Hash
}
// L1RevertBatchEvent represents a RevertBatch event raised by the ScrollChain contract.
type L1RevertBatchEvent struct {
BatchHash common.Hash
}
// L1QueueTransactionEvent represents a QueueTransaction event raised by the L1MessageQueue contract.
type L1QueueTransactionEvent struct {
Sender common.Address
Target common.Address
Value *big.Int
QueueIndex *big.Int
GasLimit *big.Int
Data []byte
}
// L1SentMessageEvent represents a SentMessage event raised by the L1ScrollMessenger contract.
type L1SentMessageEvent struct {
Sender common.Address
Target common.Address
Value *big.Int
MessageNonce *big.Int
GasLimit *big.Int
Message []byte
}
// L1FailedRelayedMessageEvent represents a FailedRelayedMessage event raised by the L1ScrollMessenger contract.
type L1FailedRelayedMessageEvent struct {
MessageHash common.Hash
}
// L1RelayedMessageEvent represents a RelayedMessage event raised by the L1ScrollMessenger contract.
type L1RelayedMessageEvent struct {
MessageHash common.Hash
}
// L2AppendMessageEvent represents a AppendMessage event raised by the L2MessageQueue contract.
type L2AppendMessageEvent struct {
Index *big.Int
MessageHash common.Hash
}
// L2ImportBlockEvent represents a ImportBlock event raised by the L1BlockContainer contract.
type L2ImportBlockEvent struct {
BlockHash common.Hash
BlockHeight *big.Int
BlockTimestamp *big.Int
BaseFee *big.Int
StateRoot common.Hash
}
// L2SentMessageEvent represents a SentMessage event raised by the L2ScrollMessenger contract.
type L2SentMessageEvent struct {
Sender common.Address
Target common.Address
Value *big.Int
MessageNonce *big.Int
GasLimit *big.Int
Message []byte
}
// L2FailedRelayedMessageEvent represents a FailedRelayedMessage event raised by the L2ScrollMessenger contract.
type L2FailedRelayedMessageEvent struct {
MessageHash common.Hash
}
// L2RelayedMessageEvent represents a RelayedMessage event raised by the L2ScrollMessenger contract.
type L2RelayedMessageEvent struct {
MessageHash common.Hash
}
// GetBatchCalldataLength gets the calldata bytelen of IScrollChainBatch.
func GetBatchCalldataLength(batch *IScrollChainBatch) uint64 {
return uint64(5*32 + len(batch.L2Transactions) + len(batch.Blocks)*8*32)
}

View File

@@ -0,0 +1,133 @@
package bridgeabi_test
import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
bridge_abi "scroll-tech/bridge/abi"
)
func TestEventSignature(t *testing.T) {
assert := assert.New(t)
assert.Equal(bridge_abi.L1SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
assert.Equal(bridge_abi.L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(bridge_abi.L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(bridge_abi.L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62"))
assert.Equal(bridge_abi.L1FinalizeBatchEventSignature, common.HexToHash("6be443154c959a7a1645b4392b6fa97d8e8ab6e8fd853d7085e8867083737d79"))
assert.Equal(bridge_abi.L1QueueTransactionEventSignature, common.HexToHash("bdcc7517f8fe3db6506dfd910942d0bbecaf3d6a506dadea65b0d988e75b9439"))
assert.Equal(bridge_abi.L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
assert.Equal(bridge_abi.L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(bridge_abi.L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(bridge_abi.L2ImportBlockEventSignature, common.HexToHash("a7823f45e1ee21f9530b77959b57507ad515a14fa9fa24d262ee80e79b2b5745"))
assert.Equal(bridge_abi.L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693"))
}
func TestPackRelayL2MessageWithProof(t *testing.T) {
assert := assert.New(t)
l1MessengerABI, err := bridge_abi.L1ScrollMessengerMetaData.GetAbi()
assert.NoError(err)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BatchHash: common.Hash{},
MerkleProof: make([]byte, 0),
}
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
assert.NoError(err)
}
func TestPackCommitBatch(t *testing.T) {
assert := assert.New(t)
scrollChainABI, err := bridge_abi.ScrollChainMetaData.GetAbi()
assert.NoError(err)
header := bridge_abi.IScrollChainBlockContext{
BlockHash: common.Hash{},
ParentHash: common.Hash{},
BlockNumber: 0,
Timestamp: 0,
BaseFee: big.NewInt(0),
GasLimit: 0,
NumTransactions: 0,
NumL1Messages: 0,
}
batch := bridge_abi.IScrollChainBatch{
Blocks: []bridge_abi.IScrollChainBlockContext{header},
PrevStateRoot: common.Hash{},
NewStateRoot: common.Hash{},
WithdrawTrieRoot: common.Hash{},
BatchIndex: 0,
L2Transactions: make([]byte, 0),
}
_, err = scrollChainABI.Pack("commitBatch", batch)
assert.NoError(err)
}
func TestPackFinalizeBatchWithProof(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := bridge_abi.ScrollChainMetaData.GetAbi()
assert.NoError(err)
proof := make([]*big.Int, 10)
instance := make([]*big.Int, 10)
for i := 0; i < 10; i++ {
proof[i] = big.NewInt(0)
instance[i] = big.NewInt(0)
}
_, err = l1RollupABI.Pack("finalizeBatchWithProof", common.Hash{}, proof, instance)
assert.NoError(err)
}
func TestPackRelayL1Message(t *testing.T) {
assert := assert.New(t)
l2MessengerABI, err := bridge_abi.L2ScrollMessengerMetaData.GetAbi()
assert.NoError(err)
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), make([]byte, 0))
assert.NoError(err)
}
func TestPackSetL1BaseFee(t *testing.T) {
assert := assert.New(t)
l1GasOracleABI, err := bridge_abi.L1GasPriceOracleMetaData.GetAbi()
assert.NoError(err)
baseFee := big.NewInt(2333)
_, err = l1GasOracleABI.Pack("setL1BaseFee", baseFee)
assert.NoError(err)
}
func TestPackSetL2BaseFee(t *testing.T) {
assert := assert.New(t)
l2GasOracleABI, err := bridge_abi.L2GasPriceOracleMetaData.GetAbi()
assert.NoError(err)
baseFee := big.NewInt(2333)
_, err = l2GasOracleABI.Pack("setL2BaseFee", baseFee)
assert.NoError(err)
}
func TestPackImportBlock(t *testing.T) {
assert := assert.New(t)
l1BlockContainerABI := bridge_abi.L1BlockContainerABI
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, make([]byte, 0), false)
assert.NoError(err)
}

View File

@@ -7,41 +7,47 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/watcher"
"scroll-tech/bridge/config"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var app *cli.App
var (
app *cli.App
)
func init() {
// Set up event-watcher app info.
app = cli.NewApp()
app.Action = action
app.Name = "event-watcher"
app.Usage = "The Scroll Event Watcher"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
return cutils.LogSetup(ctx)
}
// Register `event-watcher-test` app for integration-test.
utils.RegisterSimulation(app, utils.EventWatcherApp)
cutils.RegisterSimulation(app, cutils.EventWatcherApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
@@ -49,19 +55,21 @@ func action(ctx *cli.Context) error {
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
registry := prometheus.DefaultRegisterer
observability.Server(ctx, db)
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
@@ -73,23 +81,18 @@ func action(ctx *cli.Context) error {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
go utils.Loop(subCtx, 10*time.Second, func() {
go cutils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
log.Error("Failed to fetch bridge contract", "err", loopErr)
}
})
// Start l2 watcher process
go utils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
go cutils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
// Finish start all l2 functions
log.Info("Start event-watcher successfully")
// Catch CTRL-C to ensure a graceful shutdown.

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/event_watcher/app"
func main() {
app.Run()
}

View File

@@ -7,64 +7,70 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/relayer"
"scroll-tech/rollup/internal/controller/watcher"
butils "scroll-tech/rollup/internal/utils"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var app *cli.App
var (
app *cli.App
)
func init() {
// Set up gas-oracle app info.
app = cli.NewApp()
app.Action = action
app.Name = "gas-oracle"
app.Usage = "The Scroll Gas Oracle"
app.Description = "Scroll Gas Oracle."
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
return cutils.LogSetup(ctx)
}
// Register `gas-oracle-test` app for integration-test.
utils.RegisterSimulation(app, utils.GasOracleApp)
cutils.RegisterSimulation(app, cutils.GasOracleApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
registry := prometheus.DefaultRegisterer
observability.Server(ctx, db)
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
@@ -79,36 +85,34 @@ func action(ctx *cli.Context) error {
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, ormFactory)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1 watcher process
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
// Fetch the latest block number to decrease the delay when fetching gas prices
// Use latest block number - 1 to prevent frequent reorg
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, rpc.LatestBlockNumber)
go cutils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l1client, cfg.L1Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
if loopErr = l1watcher.FetchBlockHeader(number - 1); loopErr != nil {
log.Error("Failed to fetch L1 block header", "lastest", number-1, "err", loopErr)
if loopErr = l1watcher.FetchBlockHeader(number); loopErr != nil {
log.Error("Failed to fetch L1 block header", "lastest", number, "err", loopErr)
}
})
// Start l1relayer process
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/gas_oracle/app"
func main() {
app.Run()
}

View File

@@ -11,7 +11,7 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/rollup/internal/config"
"scroll-tech/bridge/config"
)
// MockApp mockApp-test client manager.
@@ -22,29 +22,29 @@ type MockApp struct {
mockApps map[utils.MockAppName]docker.AppAPI
originFile string
rollupFile string
bridgeFile string
args []string
}
// NewRollupApp return a new rollupApp manager, name mush be one them.
func NewRollupApp(base *docker.App, file string) *MockApp {
// NewBridgeApp return a new bridgeApp manager, name mush be one them.
func NewBridgeApp(base *docker.App, file string) *MockApp {
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", base.Timestamp)
rollupApp := &MockApp{
bridgeFile := fmt.Sprintf("/tmp/%d_bridge-config.json", base.Timestamp)
bridgeApp := &MockApp{
base: base,
mockApps: make(map[utils.MockAppName]docker.AppAPI),
originFile: file,
rollupFile: rollupFile,
args: []string{"--log.debug", "--config", rollupFile},
bridgeFile: bridgeFile,
args: []string{"--log.debug", "--config", bridgeFile},
}
if err := rollupApp.MockConfig(true); err != nil {
if err := bridgeApp.MockConfig(true); err != nil {
panic(err)
}
return rollupApp
return bridgeApp
}
// RunApp run rollup-test child process by multi parameters.
// RunApp run bridge-test child process by multi parameters.
func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) {
if !(name == utils.EventWatcherApp ||
name == utils.GasOracleApp ||
@@ -72,35 +72,35 @@ func (b *MockApp) WaitExit() {
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
}
// Free stop and release rollup mocked apps.
// Free stop and release bridge mocked apps.
func (b *MockApp) Free() {
b.WaitExit()
_ = os.Remove(b.rollupFile)
_ = os.Remove(b.bridgeFile)
}
// MockConfig creates a new rollup config.
// MockConfig creates a new bridge config.
func (b *MockApp) MockConfig(store bool) error {
base := b.base
// Load origin rollup config file.
// Load origin bridge config file.
cfg, err := config.NewConfig(b.originFile)
if err != nil {
return err
}
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.DBConfig.DSN = base.DBImg.Endpoint()
b.Config = cfg
if !store {
return nil
}
// Store changed rollup config into a temp file.
// Store changed bridge config into a temp file.
data, err := json.Marshal(b.Config)
if err != nil {
return err
}
return os.WriteFile(b.rollupFile, data, 0600)
return os.WriteFile(b.bridgeFile, data, 0600)
}

View File

@@ -0,0 +1,118 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
cutils "scroll-tech/common/utils"
)
var (
app *cli.App
)
func init() {
// Set up message-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "message-relayer"
app.Usage = "The Scroll Message Relayer"
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
app.Version = version.Version
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return cutils.LogSetup(ctx)
}
// Register `message-relayer-test` app for integration-test.
cutils.RegisterSimulation(app, cutils.MessageRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, ormFactory, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1relayer process
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
// Start l2relayer process
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessSavedEvents)
// Finish start all message relayer functions
log.Info("Start message-relayer successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run message_relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/msg_relayer/app"
func main() {
app.Run()
}

View File

@@ -7,64 +7,70 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/database"
"scroll-tech/common/metrics"
"scroll-tech/common/version"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/relayer"
"scroll-tech/rollup/internal/controller/watcher"
butils "scroll-tech/rollup/internal/utils"
"scroll-tech/bridge/config"
"scroll-tech/bridge/relayer"
"scroll-tech/bridge/utils"
"scroll-tech/bridge/watcher"
cutils "scroll-tech/common/utils"
)
var app *cli.App
var (
app *cli.App
)
func init() {
// Set up rollup-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "rollup-relayer"
app.Usage = "The Scroll Rollup Relayer"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, utils.RollupRelayerFlags...)
app.Flags = append(app.Flags, cutils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
return cutils.LogSetup(ctx)
}
// Register `rollup-relayer-test` app for integration-test.
utils.RegisterSimulation(app, utils.RollupRelayerApp)
cutils.RegisterSimulation(app, cutils.RollupRelayerApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
registry := prometheus.DefaultRegisterer
observability.Server(ctx, db)
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
@@ -73,45 +79,37 @@ func action(ctx *cli.Context) error {
return err
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, ormFactory, cfg.L2Config.RelayerConfig)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db, registry)
if err != nil {
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, l2relayer, ormFactory)
if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, ormFactory)
// Watcher loop to fetch missing blocks
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
go cutils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := utils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return
}
l2watcher.TryFetchRunningMissingBlocks(number)
l2watcher.TryFetchRunningMissingBlocks(ctx, number)
})
go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
// Batch proposer loop
go cutils.Loop(subCtx, 2*time.Second, func() {
batchProposer.TryProposeBatch()
batchProposer.TryCommitBatches()
})
go utils.Loop(subCtx, 10*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/bridge/cmd/rollup_relayer/app"
func main() {
app.Run()
}

View File

@@ -1,14 +1,16 @@
{
"l1_config": {
"confirmations": "0x6",
"endpoint": "https://rpc.ankr.com/eth",
"endpoint": "DUMMY_ENDPOINT",
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
"l1_message_queue_address": "0x0000000000000000000000000000000000000000",
"scroll_chain_address": "0x0000000000000000000000000000000000000000",
"start_height": 0,
"relayer_config": {
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://rpc.scroll.io",
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"check_pending_time": 2,
"check_balance_time": 100,
"escalate_blocks": 100,
@@ -24,19 +26,26 @@
"min_gas_price": 0,
"gas_price_diff": 50000
},
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
"finalize_batch_interval_sec": 0,
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1313131313131313131313131313131313131313131313131313131313131313"
]
}
},
"l2_config": {
"confirmations": "0x1",
"endpoint": "https://rpc.scroll.io",
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://rpc.ankr.com/eth",
"endpoint": "DUMMY_ENDPOINT",
"check_pending_time": 10,
"check_balance_time": 100,
"escalate_blocks": 100,
@@ -44,7 +53,7 @@
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "DynamicFeeTx",
"tx_type": "LegacyTx",
"min_balance": 100000000000000000000,
"pending_limit": 10
},
@@ -52,39 +61,34 @@
"min_gas_price": 0,
"gas_price_diff": 50000
},
"chain_monitor": {
"enabled": false,
"timeout": 3,
"try_times": 5,
"base_url": "http://localhost:8750"
},
"enable_test_env_bypass_features": true,
"finalize_batch_without_proof_timeout_sec": 7200,
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313",
"commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414",
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515",
"l1_commit_gas_limit_multiplier": 1.2
},
"chunk_proposer_config": {
"max_block_num_per_chunk": 100,
"max_tx_num_per_chunk": 100,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"chunk_timeout_sec": 300,
"max_row_consumption_per_chunk": 1048319,
"gas_cost_increase_multiplier": 1.2
"finalize_batch_interval_sec": 0,
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1313131313131313131313131313131313131313131313131313131313131313"
],
"rollup_sender_private_keys": [
"1414141414141414141414141414141414141414141414141414141414141414"
]
},
"batch_proposer_config": {
"max_chunk_num_per_batch": 112,
"max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345,
"batch_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
"proof_generation_freq": 1,
"batch_gas_threshold": 3000000,
"batch_tx_num_threshold": 44,
"batch_time_sec": 300,
"batch_commit_time_sec": 1200,
"batch_blocks_limit": 100,
"commit_tx_calldata_size_limit": 200000,
"public_input_config": {
"max_tx_num": 44,
"padding_tx_hash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
}
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://localhost/scroll?sslmode=disable",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
}

32
bridge/config/config.go Normal file
View File

@@ -0,0 +1,32 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
"scroll-tech/database"
)
// Config load configuration items.
type Config struct {
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *database.DBConfig `json:"db_config"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &Config{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
return cfg, nil
}

Some files were not shown because too many files have changed in this diff Show More