Compare commits

..

4 Commits

Author SHA1 Message Date
Péter Garamvölgyi
d783d01f4b fix import_genesis_block.ts 2022-11-09 13:32:46 +01:00
Péter Garamvölgyi
3c982a9199 Merge branch 'staging' into foundry-deployment-script 2022-11-02 10:01:40 +01:00
Péter Garamvölgyi
13bcda11b7 Merge branch 'staging' into foundry-deployment-script 2022-10-27 22:03:10 +02:00
Thegaram
632ca10157 use Foundry for deployment scripts 2022-10-26 21:22:05 -05:00
618 changed files with 154779 additions and 88152 deletions

View File

@@ -1,34 +0,0 @@
### Purpose or design rationale of this PR
*Describe your change. Make sure to answer these three questions: What does this PR do? Why does it do it? How does it do it?*
### PR title
Your PR title must follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) (as we are doing squash merge for each PR), so it must start with one of the following [types](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#type):
- [ ] build: Changes that affect the build system or external dependencies (example scopes: yarn, eslint, typescript)
- [ ] ci: Changes to our CI configuration files and scripts (example scopes: vercel, github, cypress)
- [ ] docs: Documentation-only changes
- [ ] feat: A new feature
- [ ] fix: A bug fix
- [ ] perf: A code change that improves performance
- [ ] refactor: A code change that doesn't fix a bug, or add a feature, or improves performance
- [ ] style: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc)
- [ ] test: Adding missing tests or correcting existing tests
### Deployment tag versioning
Has `tag` in `common/version.go` been updated or have you added `bump-version` label to this PR?
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
- [ ] Yes
### Breaking change label
Does this PR have the `breaking-change` label?
- [ ] No, this PR is not a breaking change
- [ ] Yes

View File

@@ -1,37 +0,0 @@
import { URL } from "url";
import { readFileSync, writeFileSync } from "fs";
const versionFilePath = new URL(
"../../common/version/version.go",
import.meta.url
).pathname;
const versionFileContent = readFileSync(versionFilePath, { encoding: "utf-8" });
const currentVersion = versionFileContent.match(
/var tag = "(?<version>v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+))"/
);
try {
parseInt(currentVersion.groups.major);
parseInt(currentVersion.groups.minor);
parseInt(currentVersion.groups.patch);
} catch (err) {
console.error(new Error("Failed to parse version in version.go file"));
throw err;
}
// prettier-ignore
const newVersion = `v${currentVersion.groups.major}.${currentVersion.groups.minor}.${parseInt(currentVersion.groups.patch) + 1}`;
console.log(
`Bump version from ${currentVersion.groups.version} to ${newVersion}`
);
writeFileSync(
versionFilePath,
versionFileContent.replace(
`var tag = "${currentVersion.groups.version}"`,
`var tag = "${newVersion}"`
)
);

60
.github/workflows/bridge.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: Bridge
on:
push:
branches:
- main
- staging
paths:
- 'bridge/**'
- '.github/workflows/bridge.yml'
pull_request:
branches:
- main
- staging
paths:
- 'bridge/**'
- '.github/workflows/bridge.yml'
defaults:
run:
working-directory: 'bridge'
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: pontem-network/get-solc@master
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make mock_abi
make lint
goimports-lint:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/bridge/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -1,80 +0,0 @@
name: BridgeHistoryAPI
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'bridge-history-api/**'
- '.github/workflows/bridge_history_api.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'bridge-history-api/**'
- '.github/workflows/bridge_history_api.yml'
defaults:
run:
working-directory: 'bridge-history-api'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Test
run: |
make test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: bridge-history-api
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/bridge-history-api/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -1,63 +0,0 @@
name: Bump version
on:
pull_request:
branches: [ develop ]
types:
- opened
- reopened
- synchronize
- ready_for_review
- labeled
jobs:
try-to-bump:
if: contains(github.event.pull_request.labels.*.name, 'bump-version')
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ github.head_ref }}
- name: check diff
id: check_diff
run: |
set -euo pipefail
# fetch develop branch so that we can diff against later
git fetch origin develop
echo 'checking verion changes in diff...'
# check if version changed in version.go
# note: the grep will fail if use \d instead of [0-9]
git diff HEAD..origin/develop --text --no-ext-diff --unified=0 --no-prefix common/version/version.go | grep -E '^\+var tag = "v[0-9]+\.[0-9]+\.[0-9]+"$' && true
exit_code=$?
# auto bump if version is not bumped manually
echo '> require auto version bump?'
if [ $exit_code -eq 0 ]; then
echo '> no, already bumped'
echo "result=no-bump" >> "$GITHUB_OUTPUT"
else
echo '> yes'
echo "result=bump" >> "$GITHUB_OUTPUT"
fi
- name: Install Node.js 16
if: steps.check_diff.outputs.result == 'bump'
uses: actions/setup-node@v3
with:
node-version: 16
- name: bump version in common/version/version.go
if: steps.check_diff.outputs.result == 'bump'
run: node .github/scripts/bump_version_dot_go.mjs
# Commits made by this Action do not trigger new Workflow runs
- uses: stefanzweifel/git-auto-commit-action@3ea6ae190baf489ba007f7c92608f33ce20ef04a
if: steps.check_diff.outputs.result == 'bump'
with:
skip_fetch: true # already did fetch in check diff
file_pattern: "common/version/version.go"
commit_message: "chore: auto version bump[bot]"

View File

@@ -5,101 +5,51 @@ on:
branches:
- main
- staging
- develop
- alpha
paths:
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/common.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
branches:
- main
- staging
paths:
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/common.yml'
defaults:
run:
working-directory: 'common'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v4
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
uses: actions/checkout@v2
- name: Lint
working-directory: 'common'
run: |
rm -rf $HOME/.cache/golangci-lint
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
working-directory: 'common'
run: goimports -local scroll-tech/common/ -w .
- name: Run go mod tidy
working-directory: 'common'
run: go mod tidy
- run: goimports -local scroll-tech/common/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'common'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
name: 'solc'
version: '0.8.24'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
- name: Test common packages
working-directory: 'common'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: common

121
.github/workflows/contracts.yaml vendored Normal file
View File

@@ -0,0 +1,121 @@
name: Contracts
on:
push:
branches:
- master
- main
- prod
- release/*
- staging
paths:
- 'contracts/**'
- '.github/workflows/contracts.yaml'
pull_request:
branches:
- master
- main
- prod
- release/*
- staging
paths:
- 'contracts/**'
- '.github/workflows/contracts.yaml'
defaults:
run:
working-directory: 'contracts'
jobs:
foundry:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
submodules: recursive
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly
- name: Install Node.js 14
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- name: Cache yarn dependencies
uses: actions/cache@v2
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('contracts/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Cache node_modules
id: npm_cache
uses: actions/cache@v2
with:
path: node_modules
key: node_modules-${{ hashFiles('contracts/yarn.lock') }}
- name: yarn install
# if: steps.npm_cache.outputs.cache-hit != 'true'
run: yarn install
- name: Compile with foundry
run: forge build
- name: Run foundry tests
run: forge test -vvv
hardhat:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
submodules: recursive
- name: Install Node.js 14
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- name: Cache yarn dependencies
uses: actions/cache@v2
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('contracts/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Cache node_modules
id: npm_cache
uses: actions/cache@v2
with:
path: node_modules
key: node_modules-${{ hashFiles('contracts/yarn.lock') }}
- name: yarn install
# if: steps.npm_cache.outputs.cache-hit != 'true'
run: yarn install
- name: Compile with hardhat
run: npx hardhat compile
- name: Run hardhat tests
run: npx hardhat test

View File

@@ -5,118 +5,51 @@ on:
branches:
- main
- staging
- develop
- alpha
paths:
- 'coordinator/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/coordinator.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
branches:
- main
- staging
paths:
- 'coordinator/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/coordinator.yml'
defaults:
run:
working-directory: 'coordinator'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Lint
working-directory: 'coordinator'
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
working-directory: 'coordinator'
run: goimports -local scroll-tech/coordinator/ -w .
- name: Run go mod tidy
working-directory: 'coordinator'
run: go mod tidy
- run: goimports -local scroll-tech/coordinator/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'coordinator'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
# docker-build:
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v4
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - name: Build and push
# uses: docker/build-push-action@v2
# with:
# context: .
# file: ./build/dockerfiles/coordinator.Dockerfile
# push: false
# # cache-from: type=gha,scope=${{ github.workflow }}
# # cache-to: type=gha,scope=${{ github.workflow }}
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
name: 'solc'
version: '0.8.24'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
- name: Test coordinator packages
working-directory: 'coordinator'
run: |
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: coordinator

View File

@@ -5,94 +5,51 @@ on:
branches:
- main
- staging
- develop
- alpha
paths:
- 'database/**'
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/database.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
branches:
- main
- staging
paths:
- 'database/**'
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/database.yml'
defaults:
run:
working-directory: 'database'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Lint
working-directory: 'database'
run: |
rm -rf $HOME/.cache/golangci-lint
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
working-directory: 'database'
run: goimports -local scroll-tech/database/ -w .
- name: Run go mod tidy
working-directory: 'database'
run: go mod tidy
- run: goimports -local scroll-tech/database/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'database'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
name: 'solc'
version: '0.8.24'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
- name: Test database packages
working-directory: 'database'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: database

View File

@@ -1,41 +0,0 @@
name: Docker-coordinator-api-arm64
on:
workflow_dispatch:
inputs:
tag:
description: "tag of this image (suffix -arm64 is added automatically)"
required: true
type: string
jobs:
build-and-push-arm64-image:
runs-on: ubuntu-latest
strategy:
matrix:
arch:
- aarch64
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
run: |
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
docker buildx create --name multiarch --driver docker-container --use
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build docker image
uses: docker/build-push-action@v2
with:
platforms: linux/arm64
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
push: true
tags: scrolltech/coordinator-api:${{inputs.tag}}-arm64

View File

@@ -1,369 +0,0 @@
name: Docker
on:
push:
tags:
- v**
env:
AWS_REGION: us-west-2
jobs:
gas_oracle:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: gas-oracle
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: gas-oracle
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup_relayer:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: rollup-relayer
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: rollup-relayer
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup-db-cli:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: rollup-db-cli
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: rollup-db-cli
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/db_cli.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-fetcher:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: bridgehistoryapi-fetcher
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: bridgehistoryapi-fetcher
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-api:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: bridgehistoryapi-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: bridgehistoryapi-api
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-db-cli:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: bridgehistoryapi-db-cli
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: bridgehistoryapi-db-cli
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-api:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: coordinator-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: coordinator-api
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-cron:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: coordinator-cron
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: coordinator-cron
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/coordinator-cron.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest

View File

@@ -1,43 +0,0 @@
name: Integration
on:
push:
branches:
- main
- staging
- develop
- alpha
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
jobs:
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
name: 'solc'
version: '0.8.24'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
make -C rollup mock_abi
make -C common/bytecode all
- name: Run integration tests
run: |
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...

View File

@@ -1,112 +0,0 @@
name: Intermediate Docker
on:
workflow_dispatch:
inputs:
GO_VERSION:
description: "Go version"
required: true
type: choice
options:
- "1.20"
- "1.21"
- "1.22"
- "1.23"
default: "1.21"
RUST_VERSION:
description: "Rust toolchain version"
required: true
type: choice
options:
- nightly-2023-12-03
- nightly-2022-12-10
default: "nightly-2023-12-03"
PYTHON_VERSION:
description: "Python version"
required: false
type: choice
options:
- "3.10"
default: "3.10"
CUDA_VERSION:
description: "Cuda version"
required: false
type: choice
options:
- "11.7.1"
- "12.2.2"
default: "11.7.1"
CARGO_CHEF_TAG:
description: "Cargo chef version"
required: true
default: "0.1.41"
type: choice
options:
- 0.1.41
BASE_IMAGE:
description: "which intermediate image you want to update"
required: true
default: "go-alpine-builder"
type: choice
options:
- cuda-go-rust-builder
- go-rust-builder
- go-alpine-builder
- rust-builder
- rust-alpine-builder
- go-rust-alpine-builder
- py-runner
defaults:
run:
working-directory: "build/dockerfiles/intermediate"
jobs:
build-and-publish-intermediate:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: set tag env
run: |
if [ ${{github.event.inputs.BASE_IMAGE}} == "cuda-go-rust-builder" ]; then
echo "TAG=cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-rust-builder" ]; then
echo "TAG=go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-alpine-builder" ]; then
echo "TAG=${{ github.event.inputs.GO_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "rust-builder" ]; then
echo "TAG=${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "rust-alpine-builder" ]; then
echo "TAG=${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-rust-alpine-builder" ]; then
echo "TAG=go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "py-runner" ]; then
echo "TAG=${{ github.event.inputs.PYTHON_VERSION }}" >> $GITHUB_ENV
else
echo "no BASE_IMAGE match"
fi
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/${{ github.event.inputs.BASE_IMAGE }}.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/${{ github.event.inputs.BASE_IMAGE }}:${{ env.TAG }}
build-args: |
CUDA_VERSION=${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION=${{ github.event.inputs.GO_VERSION }}
RUST_VERSION=${{ github.event.inputs.RUST_VERSION }}
PYTHON_VERSION=${{ github.event.inputs.PYTHON_VERSION }}
CARGO_CHEF_TAG=${{ github.event.inputs.CARGO_CHEF_TAG }}

View File

@@ -1,99 +0,0 @@
name: Prover
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
defaults:
run:
working-directory: 'prover'
jobs:
skip_check:
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5
with:
cancel_others: 'true'
concurrent_skipping: 'same_content_newer'
paths_ignore: '["**/README.md"]'
fmt:
needs: [skip_check]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: rustfmt
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Cargo check
run: cargo check --all-features
- name: Cargo fmt
run: cargo fmt --all -- --check
clippy:
needs: [skip_check, fmt]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: clippy
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Run clippy
run: cargo clippy --all-features --all-targets -- -D warnings
compile:
needs: [skip_check, clippy]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Test
run: |
make prover

88
.github/workflows/roller.yml vendored Normal file
View File

@@ -0,0 +1,88 @@
name: Roller
on:
push:
branches:
- main
- staging
paths:
- 'roller/**'
- '.github/workflows/roller.yml'
pull_request:
branches:
- main
- staging
paths:
- 'roller/**'
- '.github/workflows/roller.yml'
defaults:
run:
working-directory: 'roller'
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-08-23
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo registry
uses: actions/cache@v2
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-roller-cargo-registry-${{ hashFiles('roller/core/prover/rust/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v2
with:
path: ~/.cargo/git
key: ${{ runner.os }}-roller-cargo-index-${{ hashFiles('roller/core/prover/rust/Cargo.lock') }}
- name: Cache cargo target
uses: actions/cache@v2
with:
path: /home/runner/work/scroll/scroll/roller/core/prover/rust/target
key: ${{ runner.os }}-roller-cargo-build-target-${{ hashFiles('roller/core/prover/rust/Cargo.lock') }}
- name: Test
run: |
make roller
go test -v ./...
check:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/roller/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -1,123 +0,0 @@
name: Rollup
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'rollup/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/rollup.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'rollup/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/rollup.yml'
jobs:
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
name: 'solc'
version: '0.8.24'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint
working-directory: 'rollup'
run: |
rm -rf $HOME/.cache/golangci-lint
make mock_abi
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
run: goimports -local scroll-tech/rollup/ -w .
working-directory: 'rollup'
- name: Run go mod tidy
run: go mod tidy
working-directory: 'rollup'
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
working-directory: 'rollup'
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
name: 'solc'
version: '0.8.24'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
run: |
make dev_docker
make -C rollup mock_abi
- name: Build rollup binaries
working-directory: 'rollup'
run: |
make rollup_bins
- name: Test rollup packages
working-directory: 'rollup'
run: |
make test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: rollup
# docker-build:
# if: github.event.pull_request.draft == false
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v4
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - run: make docker

25
.gitignore vendored
View File

@@ -1,26 +1,3 @@
# Asset files
.idea
assets/params*
assets/seed
# Built binaries
build/bin
verifier.test
core.test
coverage.txt
*.integration.txt
# Visual Studio Code
.vscode
# IntelliJ
.idea
# MacOS
.DS_Store
# misc
sftp-config.json
*~
target

18
.gitmodules vendored
View File

@@ -1,3 +1,15 @@
[submodule "scroll-contracts"]
path = scroll-contracts
url = https://github.com/scroll-tech/scroll-contracts.git
[submodule "l2geth"]
path = l2geth
url = git@github.com:scroll-tech/go-ethereum.git
[submodule "rpc-gateway"]
path = rpc-gateway
url = git@github.com:scroll-tech/rpc-gateway.git
[submodule "contracts/lib/ds-test"]
path = contracts/lib/ds-test
url = https://github.com/dapphub/ds-test
[submodule "contracts/lib/forge-std"]
path = contracts/lib/forge-std
url = https://github.com/foundry-rs/forge-std
[submodule "contracts/lib/solmate"]
path = contracts/lib/solmate
url = https://github.com/rari-capital/solmate

View File

@@ -1,5 +0,0 @@
repos:
- repo: git@github.com:scroll-tech/keydead.git
rev: main
hooks:
- id: keydead

View File

@@ -1,128 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct that could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
contributor@scroll.io.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

View File

@@ -1,43 +0,0 @@
## Contributing
[fork]: /fork
[pr]: /compare
[style]: https://standardjs.com/
[code-of-conduct]: CODE_OF_CONDUCT.md
Hi there! We're thrilled that you'd like to contribute to this project. Your help is essential for keeping it great.
Please note that this project is released with a [Contributor Code of Conduct][code-of-conduct]. By participating in this project you agree to abide by its terms.
## Contribute to Scroll
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
## Issues and PRs
If you have suggestions for how this project could be improved, or want to report a bug, open an issue! We'd love all and any contributions. If you have questions, too, we'd love to hear them.
We'd also love PRs. If you're thinking of a large PR, we advise opening up an issue first to talk about it, though! Look at the links below if you're not sure how to open a PR.
## Submitting a pull request
1. [Fork][fork] and clone the repository.
2. Create a new branch: `git checkout -b my-branch-name`.
3. Make your change, add tests, and make sure the tests still pass.
4. Format your code in scroll home directory: `make lint && make fmt`
5. Push to your fork and [submit a pull request][pr].
6. Pat yourself on the back and wait for your pull request to be reviewed and merged.
Here are a few things you can do that will increase the likelihood of your pull request being accepted:
- Write and update tests.
- Keep your changes as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
Work in Progress pull requests are also welcome to get feedback early on, or if there is something that blocked you.
## Resources
- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/)
- [GitHub Help](https://help.github.com)

116
Jenkinsfile vendored Normal file
View File

@@ -0,0 +1,116 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
def boolean test_result = false
pipeline {
agent any
options {
timeout (20)
}
tools {
go 'go-1.18'
}
environment {
GO111MODULE = 'on'
}
stages {
stage('Build') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
}
}
steps {
//start to build project
sh '''#!/bin/bash
export PATH=/home/ubuntu/go/bin:$PATH
make dev_docker
make -C bridge mock_abi
make -C bridge bridge
make -C bridge docker
make -C coordinator coordinator
make -C coordinator docker
'''
}
}
stage('Test') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
}
}
steps {
sh "docker ps -aq | xargs -r docker stop"
sh "docker container prune -f"
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
sh '''
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database/migrate
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database/docker
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/abi
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/l1
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/l2
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/sender
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/common/docker
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/coordinator
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/coordinator/verifier
cd ..
'''
script {
for (i in ['bridge', 'coordinator', 'database']) {
sh "cd $i && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|l2\\|l1\\|common\\|coordinator')"
}
}
script { test_result = true }
}
}
}
stage('Docker') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
}
}
steps {
withCredentials([usernamePassword(credentialsId: "${credentialDocker}", passwordVariable: 'dockerPassword', usernameVariable: 'dockerUser')]) {
script {
if (test_result == true) {
sh 'docker login --username=${dockerUser} --password=${dockerPassword}'
for (i in ['bridge', 'coordinator']) {
sh "docker build -t ${imagePrefix}/$i:${GIT_COMMIT} -f build/dockerfiles/${i}.Dockerfile ."
sh "docker push ${imagePrefix}/$i:${GIT_COMMIT}"
sh "docker rmi ${imagePrefix}/$i:${GIT_COMMIT}"
}
}
}
}
}
}
}
post {
always {
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
}
}
}

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2022-2024 Scroll
Copyright (c) 2022 Scroll
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,47 +1,33 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.6.3
.PHONY: check update dev_docker clean
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
update: ## Update dependencies
go work sync
cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
lint: ## The code's format and security checks
make -C rollup lint
lint: ## The code's format and security checks.
make -C bridge lint
make -C common lint
make -C coordinator lint
make -C database lint
make -C bridge-history-api lint
make -C roller lint
fmt: ## Format the code
update: ## update dependencies
go work sync
cd $(PWD)/bridge-history-api/ && go mod tidy
cd $(PWD)/bridge/ && go mod tidy
cd $(PWD)/common/ && go mod tidy
cd $(PWD)/coordinator/ && go mod tidy
cd $(PWD)/database/ && go mod tidy
cd $(PWD)/rollup/ && go mod tidy
cd $(PWD)/tests/integration-test/ && go mod tidy
goimports -local $(PWD)/bridge-history-api/ -w .
cd $(PWD)/roller/ && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .
goimports -local $(PWD)/database/ -w .
goimports -local $(PWD)/rollup/ -w .
goimports -local $(PWD)/tests/integration-test/ -w .
goimports -local $(PWD)/roller/ -w .
dev_docker: ## Build docker images for development/testing usages
docker pull postgres
docker build -t scroll_l1geth --platform linux/amd64 ./common/testcontainers/docker/l1geth/
docker build -t scroll_l2geth --platform linux/amd64 ./common/testcontainers/docker/l2geth/
dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/.
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -1,55 +1,3 @@
# Scroll Monorepo
[![rollup](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml)
[![bridge-history](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml)
[![coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml)
[![prover](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml)
[![integration](https://github.com/scroll-tech/scroll/actions/workflows/integration.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/integration.yml)
[![codecov](https://codecov.io/gh/scroll-tech/scroll/branch/develop/graph/badge.svg?token=VJVHNQWGGW)](https://codecov.io/gh/scroll-tech/scroll)
<a href="https://scroll.io">Scroll</a> is a zkRollup Layer 2 dedicated to enhance Ethereum scalability through a bytecode-equivalent [zkEVM](https://github.com/scroll-tech/zkevm-circuits) circuit. This monorepo encompasses essential infrastructure components of the Scroll protocol. It contains the L1 and L2 contracts, the rollup node, the prover client, and the prover coordinator.
## Directory Structure
<pre>
├── <a href="./bridge-history-api/">bridge-history-api</a>: Bridge history service that collects deposit and withdraw events from both L1 and L2 chains and generates withdrawal proofs
├── <a href="./common/">common</a>: Common libraries and types
├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers
├── <a href="./database">database</a>: Database client and schema definition
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
├── <a href="./rollup">rollup</a>: Rollup-related services
├── <a href="https://github.com/scroll-tech/scroll-contracts.git">scroll-contracts</a>: solidity code for Scroll L1 bridge and rollup contracts and L2 bridge and pre-deployed contracts.
└── <a href="./tests">tests</a>: Integration tests
</pre>
## Contributing
We welcome community contributions to this repository. Before you submit any issues or PRs, please read the [Code of Conduct](CODE_OF_CONDUCT.md) and the [Contribution Guideline](CONTRIBUTING.md).
## Prerequisites
+ Go 1.21
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ Hardhat / Foundry
+ Docker
To run the tests, it is essential to first pull or build the required Docker images. Execute the following commands in the root directory of the repository to do this:
```bash
docker pull postgres
make dev_docker
```
## Unit Tests
Run the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
## License
Scroll Monorepo is licensed under the [MIT](./LICENSE) license.
[![Contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [![Bridge](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [![Coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [![Database](https://github.com/scroll-tech/scroll/actions/workflows/database.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [![Common](https://github.com/scroll-tech/scroll/actions/workflows/common.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [![Roller](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)

View File

@@ -1 +0,0 @@
/build/bin

View File

@@ -1,42 +0,0 @@
.PHONY: lint
REPO_ROOT_DIR=./..
IMAGE_VERSION=latest
PWD=$(shell pwd)
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
bridgehistoryapi-db-cli:
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli
bridgehistoryapi-fetcher:
go build -o $(PWD)/build/bin/bridgehistoryapi-fetcher ./cmd/fetcher
bridgehistoryapi-api:
go build -o $(PWD)/build/bin/bridgehistoryapi-api ./cmd/api
reset-env:
if docker ps -a -q -f name=bridgehistoryapi-redis | grep -q . ; then \
docker stop bridgehistoryapi-redis; \
docker rm bridgehistoryapi-redis; \
fi
docker run --name bridgehistoryapi-redis -d -p 6379:6379 redis:latest
if docker ps -a -q -f name=bridgehistoryapi-history-db | grep -q . ; then \
docker stop bridgehistoryapi-history-db; \
docker rm bridgehistoryapi-history-db; \
fi
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
until docker exec bridgehistoryapi-history-db pg_isready -h localhost -p 5432 -U postgres > /dev/null; do \
echo "Waiting for postgres to be ready..."; \
sleep 1; \
done
echo "Postgres is ready."
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli && $(PWD)/build/bin/bridgehistoryapi-db-cli reset
bridgehistoryapi-docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile

View File

@@ -1,128 +0,0 @@
# bridge-history-api
This directory contains the `bridge-history-api` service that provides REST APIs to query txs interact with Scroll official bridge contracts
## Instructions
The bridge-history-api contains three distinct components
### bridgehistoryapi-db-cli
Provide init, show version, rollback, and check status services of DB
```
cd ./bridge-history-api
make bridgehistoryapi-db-cli
./build/bin/bridgehistoryapi-db-cli [command]
```
### bridgehistoryapi-fetcher
Fetch the transactions from both L1 and L2
```
cd ./bridge-history-api
make bridgehistoryapi-fetcher
./build/bin/bridgehistoryapi-fetcher
```
### bridgehistoryapi-api
provides REST APIs. Please refer to the API details below.
```
cd ./bridge-history-api
make bridgehistoryapi-api
./build/bin/bridgehistoryapi-api
```
## APIs provided by bridgehistoryapi-api
1. `/api/txs`
```
// @Summary get all txs under the given address
// @Accept plain
// @Produce plain
// @Param address query string true "wallet address"
// @Param page_size query int true "page size"
// @Param page query int true "page"
// @Success 200
// @Router /api/txs [get]
```
2. `/api/l2/withdrawals`
```
// @Summary get all L2 withdrawals under given address
// @Accept plain
// @Produce plain
// @Param address query string true "wallet address"
// @Param page_size query int true "page size"
// @Param page query int true "page"
// @Success 200
// @Router /api/l2/withdrawals [get]
```
3. `/api/l2/unclaimed/withdrawals`
```
// @Summary get all L2 unclaimed withdrawals under the given address
// @Accept plain
// @Produce plain
// @Param address query string true "wallet address"
// @Param page_size query int true "page size"
// @Param page query int true "page"
// @Success 200
// @Router /api/l2/unclaimed/withdrawals [get]
```
4. `/api/txsbyhashes`
```
// @Summary get txs by given tx hashes
// @Accept plain
// @Produce plain
// @Param hashes query string array true "array of hashes"
// @Success 200
// @Router /api/txsbyhashes [post]
```
## Running bridge-history-api locally
1. Pull the latest Redis image:
```
docker pull redis:latest
```
2. Run the Redis container:
```
docker run --name bridgehistoryapi-redis -d -p 6379:6379 redis:latest
```
3. Pull the latest PostgreSQL image:
```
docker pull postgres:latest
```
4. Run the PostgreSQL container:
```
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
```
5. Run database migrations to initialize the tables:
```
make bridgehistoryapi-db-cli
./build/bin/bridgehistoryapi-db-cli migrate
```
6. Run bridgehistoryapi-fetcher:
```
make bridgehistoryapi-fetcher
./build/bin/bridgehistoryapi-fetcher
```
7. Run bridgehistoryapi-api:
```
make bridgehistoryapi-api
./build/bin/bridgehistoryapi-api
```
The endpoints provided in [./conf/config.json](./conf/config.json) are all public endpoints and have rate limits.
For production usage:
- For L1 endpoints, utilizing a service provider's free tier should suffice.
- For L2 endpoints, consider [running a Scroll L2geth node](https://docs.scroll.xyz/en/developers/guides/running-a-scroll-node) and using the exposed HTTP port.

File diff suppressed because one or more lines are too long

View File

@@ -1,105 +0,0 @@
package app
import (
"crypto/tls"
"fmt"
"os"
"os/signal"
"time"
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/controller/api"
"scroll-tech/bridge-history-api/internal/route"
)
var app *cli.App
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "Scroll Bridge History API Web Service"
app.Usage = "The Scroll Bridge History API Web Service"
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db", "err", err)
}
defer func() {
if deferErr := database.CloseDB(db); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
opts := &redis.Options{
Addr: cfg.Redis.Address,
Username: cfg.Redis.Username,
Password: cfg.Redis.Password,
MinIdleConns: cfg.Redis.MinIdleConns,
ReadTimeout: time.Duration(cfg.Redis.ReadTimeoutMs * int(time.Millisecond)),
}
// Production Redis service has enabled transit_encryption.
if !cfg.Redis.Local {
opts.TLSConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
InsecureSkipVerify: true, //nolint:gosec
}
}
log.Info("init redis client", "addr", opts.Addr, "user name", opts.Username, "is local", cfg.Redis.Local,
"min idle connections", opts.MinIdleConns, "read timeout", opts.ReadTimeout)
redisClient := redis.NewClient(opts)
api.InitController(db, redisClient)
router := gin.Default()
registry := prometheus.DefaultRegisterer
route.Route(router, cfg, registry)
go func() {
port := ctx.Int(utils.ServicePortFlag.Name)
if runServerErr := router.Run(fmt.Sprintf(":%d", port)); runServerErr != nil {
log.Crit("run http server failure", "error", runServerErr)
}
}()
observability.Server(ctx, db)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run bridge-history-backend api cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge-history-api/cmd/api/app"
func main() {
app.Run()
}

View File

@@ -1,67 +0,0 @@
package app
import (
"fmt"
"os"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
)
var app *cli.App
func init() {
app = cli.NewApp()
app.Name = "db_cli"
app.Usage = "The Scroll Bridge-history-api DB CLI"
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
app.Commands = []*cli.Command{
{
Name: "reset",
Usage: "Clean and reset database.",
Action: resetDB,
},
{
Name: "status",
Usage: "Check migration status.",
Action: checkDBStatus,
},
{
Name: "version",
Usage: "Display the current database version.",
Action: dbVersion,
},
{
Name: "migrate",
Usage: "Migrate the database to the latest version.",
Action: migrateDB,
},
{
Name: "rollback",
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
Action: rollbackDB,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "version",
Usage: "Rollback to the specified version.",
Value: 0,
},
},
},
}
}
// Run database cmd instance.
func Run() {
// RunApp the db_cli.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,120 +0,0 @@
package app
import (
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/utils"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm/migrate"
)
func getConfig(ctx *cli.Context) (*config.Config, error) {
file := ctx.String(utils.ConfigFileFlag.Name)
dbCfg, err := config.NewConfig(file)
if err != nil {
return nil, err
}
return dbCfg, nil
}
func initDB(dbCfg *database.Config) (*gorm.DB, error) {
return database.InitDB(dbCfg)
}
// resetDB clean or reset database.
func resetDB(ctx *cli.Context) error {
cfg, err := getConfig(ctx)
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
err = migrate.ResetDB(db)
if err != nil {
return err
}
log.Info("successful to reset")
return nil
}
// checkDBStatus check db status
func checkDBStatus(ctx *cli.Context) error {
cfg, err := getConfig(ctx)
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
return migrate.Status(db)
}
// dbVersion return the latest version
func dbVersion(ctx *cli.Context) error {
cfg, err := getConfig(ctx)
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
version, err := migrate.Current(db)
log.Info("show database version", "db version", version)
return err
}
// migrateDB migrate db
func migrateDB(ctx *cli.Context) error {
cfg, err := getConfig(ctx)
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
return migrate.Migrate(db)
}
// rollbackDB rollback db by version
func rollbackDB(ctx *cli.Context) error {
cfg, err := getConfig(ctx)
if err != nil {
return err
}
gormDB, err := initDB(cfg.DB)
if err != nil {
return err
}
db, err := gormDB.DB()
if err != nil {
return err
}
version := ctx.Int64("version")
return migrate.Rollback(db, &version)
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge-history-api/cmd/db_cli/app"
func main() {
app.Run()
}

View File

@@ -1,93 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/controller/fetcher"
)
var app *cli.App
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "Scroll Bridge History API Message Fetcher"
app.Usage = "The Scroll Bridge History API Message Fetcher"
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
defer cancel()
l1Client, err := ethclient.Dial(cfg.L1.Endpoint)
if err != nil {
log.Crit("failed to connect to L1 geth", "endpoint", cfg.L1.Endpoint, "err", err)
}
l2Client, err := ethclient.Dial(cfg.L2.Endpoint)
if err != nil {
log.Crit("failed to connect to L2 geth", "endpoint", cfg.L2.Endpoint, "err", err)
}
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db", "err", err)
}
defer func() {
if deferErr := database.CloseDB(db); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
if err != nil {
log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
}
observability.Server(ctx, db)
l1MessageFetcher := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
go l1MessageFetcher.Start()
l2MessageFetcher := fetcher.NewL2MessageFetcher(subCtx, cfg.L2, db, l2Client)
go l2MessageFetcher.Start()
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run bridge-history-backend fetcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/bridge-history-api/cmd/fetcher/app"
func main() {
app.Run()
}

View File

@@ -1,60 +0,0 @@
{
"L1": {
"confirmation": 0,
"endpoint": "https://rpc.ankr.com/eth",
"startHeight": 18306000,
"blockTime": 12,
"fetchLimit": 16,
"MessengerAddr": "0x6774Bcbd5ceCeF1336b5300fb5186a12DDD8b367",
"ETHGatewayAddr": "0x7F2b8C31F88B6006c382775eea88297Ec1e3E905",
"WETHGatewayAddr": "0x7AC440cAe8EB6328de4fA621163a792c1EA9D4fE",
"StandardERC20GatewayAddr": "0xD8A791fE2bE73eb6E6cF1eb0cb3F36adC9B3F8f9",
"CustomERC20GatewayAddr": "0xb2b10a289A229415a124EFDeF310C10cb004B6ff",
"ERC721GatewayAddr": "0x6260aF48e8948617b8FA17F4e5CEa2d21D21554B",
"ERC1155GatewayAddr": "0xb94f7F6ABcb811c5Ac709dE14E37590fcCd975B6",
"USDCGatewayAddr": "0xf1AF3b23DE0A5Ca3CAb7261cb0061C0D779A5c7B",
"LIDOGatewayAddr": "0x6625C6332c9F91F2D27c304E729B86db87A3f504",
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
"PufferGatewayAddr": "0xA033Ff09f2da45f0e9ae495f525363722Df42b2a",
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
"BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4",
"GasTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000"
},
"L2": {
"confirmation": 0,
"endpoint": "https://rpc.scroll.io",
"blockTime": 3,
"fetchLimit": 64,
"MessengerAddr": "0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC",
"ETHGatewayAddr": "0x6EA73e05AdC79974B931123675ea8F78FfdacDF0",
"WETHGatewayAddr": "0x7003E7B7186f0E6601203b99F7B8DECBfA391cf9",
"StandardERC20GatewayAddr": "0xE2b4795039517653c5Ae8C2A9BFdd783b48f447A",
"CustomERC20GatewayAddr": "0x64CCBE37c9A82D85A1F2E74649b7A42923067988",
"ERC721GatewayAddr": "0x7bC08E1c04fb41d75F1410363F0c5746Eae80582",
"ERC1155GatewayAddr": "0x62597Cc19703aF10B58feF87B0d5D29eFE263bcc",
"USDCGatewayAddr": "0x33B60d5Dd260d453cAC3782b0bDC01ce84672142",
"LIDOGatewayAddr": "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9",
"DAIGatewayAddr": "0xaC78dff3A87b5b534e366A93E785a0ce8fA6Cc62",
"PufferGatewayAddr": "0x9eBf2f33526CD571f8b2ad312492cb650870CFd6",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79",
"MessageQueueAddr": "0x5300000000000000000000000000000000000000",
"BatchBridgeGatewayAddr": "0xa1a12158bE6269D7580C63eC5E609Cdc0ddD82bC"
},
"db": {
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",
"driverName": "postgres",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"redis": {
"address": "localhost:6379",
"username": "default",
"password": "",
"local": true,
"minIdleConns": 10,
"readTimeoutMs": 500
}
}

View File

@@ -1,120 +0,0 @@
module scroll-tech/bridge-history-api
go 1.21
require (
github.com/gin-contrib/cors v1.5.0
github.com/gin-gonic/gin v1.9.1
github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.7.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
require (
dario.cat/mergo v1.0.0 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/bytedance/sonic v1.10.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
github.com/docker/docker v26.1.0+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-kit/kit v0.9.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.15.5 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
github.com/jackc/pgx/v5 v5.5.4 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/gomega v1.27.1 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/sethvargo/go-retry v0.2.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.12 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.8.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

View File

@@ -1,490 +0,0 @@
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/ClickHouse/ch-go v0.58.2 h1:jSm2szHbT9MCAB1rJ3WuCJqmGLi5UTjlNu+f530UTS0=
github.com/ClickHouse/ch-go v0.58.2/go.mod h1:Ap/0bEmiLa14gYjCiRkYGbXvbe8vwdrfTYWhsuQ99aw=
github.com/ClickHouse/clickhouse-go/v2 v2.15.0 h1:G0hTKyO8fXXR1bGnZ0DY3vTG01xYfOGW76zgjg5tmC4=
github.com/ClickHouse/clickhouse-go/v2 v2.15.0/go.mod h1:kXt1SRq0PIRa6aKZD7TnFnY9PQKmc2b13sHtOYcK6cQ=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0=
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible h1:xhVCHXq+P5LhT31+RuDuk0xXEbEnd50Fr37J1bGuyWg=
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v26.1.0+incompatible h1:W1G9MPNbskA6VZWL7b3ZljTh0pXI68FpINx0GKaOdaM=
github.com/docker/docker v26.1.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T/Lao=
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk=
github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4=
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=
github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M=
github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4=
github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg=
github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s=
github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.16.0 h1:xMJUsZdHLqSnCqESyKSqEfcYVYsUuup1nrOhaEFftQg=
github.com/pressly/goose/v3 v3.16.0/go.mod h1:JwdKVnmCRhnF6XLQs2mHEQtucFD49cQBdRM4UiwkxsM=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA=
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw=
github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dzWP1Lu+A40W883dK/Mr3xyDSM/2MggS8GtHT0qgAnE=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde h1:9DShaph9qhkIYw7QF91I/ynrr4cOO2PZra2PFD7Mfeg=
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI=
modernc.org/libc v1.32.0 h1:yXatHTrACp3WaKNRCoZwUK7qj5V8ep1XyY0ka4oYcNc=
modernc.org/libc v1.32.0/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8=
modernc.org/sqlite v1.27.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=

View File

@@ -1,77 +0,0 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
"scroll-tech/common/database"
"scroll-tech/common/utils"
)
// FetcherConfig is the configuration of Layer1 or Layer2 fetcher.
type FetcherConfig struct {
Confirmation uint64 `json:"confirmation"`
Endpoint string `json:"endpoint"`
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, message proof should be updated from the very beginning.
BlockTime int64 `json:"blockTime"`
FetchLimit uint64 `json:"fetchLimit"`
MessengerAddr string `json:"MessengerAddr"`
ETHGatewayAddr string `json:"ETHGatewayAddr"`
StandardERC20GatewayAddr string `json:"StandardERC20GatewayAddr"`
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
WETHGatewayAddr string `json:"WETHGatewayAddr"`
DAIGatewayAddr string `json:"DAIGatewayAddr"`
USDCGatewayAddr string `json:"USDCGatewayAddr"`
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
PufferGatewayAddr string `json:"PufferGatewayAddr"`
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
ScrollChainAddr string `json:"ScrollChainAddr"`
GatewayRouterAddr string `json:"GatewayRouterAddr"`
MessageQueueAddr string `json:"MessageQueueAddr"`
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
GasTokenGatewayAddr string `json:"GasTokenGatewayAddr"`
WrappedTokenGatewayAddr string `json:"WrappedTokenGatewayAddr"`
}
// RedisConfig redis config
type RedisConfig struct {
Address string `json:"address"`
Username string `json:"username"`
Password string `json:"password"`
DB int `json:"db"`
Local bool `json:"local"`
MinIdleConns int `json:"minIdleConns"`
ReadTimeoutMs int `json:"readTimeoutMs"`
}
// Config is the configuration of the bridge history backend
type Config struct {
L1 *FetcherConfig `json:"L1"`
L2 *FetcherConfig `json:"L2"`
DB *database.Config `json:"db"`
Redis *RedisConfig `json:"redis"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &Config{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_BRIDGE_HISTORY")
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -1,34 +0,0 @@
package api
import (
"sync"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
)
var (
// TxsByAddressCtl the TxsByAddressController instance
TxsByAddressCtl *TxsByAddressController
// TxsByHashesCtl the TxsByHashesController instance
TxsByHashesCtl *TxsByHashesController
// L2UnclaimedWithdrawalsByAddressCtl the L2UnclaimedWithdrawalsByAddressController instance
L2UnclaimedWithdrawalsByAddressCtl *L2UnclaimedWithdrawalsByAddressController
// L2WithdrawalsByAddressCtl the L2WithdrawalsByAddressController instance
L2WithdrawalsByAddressCtl *L2WithdrawalsByAddressController
initControllerOnce sync.Once
)
// InitController inits Controller with database
func InitController(db *gorm.DB, redis *redis.Client) {
initControllerOnce.Do(func() {
TxsByAddressCtl = NewTxsByAddressController(db, redis)
TxsByHashesCtl = NewTxsByHashesController(db, redis)
L2UnclaimedWithdrawalsByAddressCtl = NewL2UnclaimedWithdrawalsByAddressController(db, redis)
L2WithdrawalsByAddressCtl = NewL2WithdrawalsByAddressController(db, redis)
})
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// L2UnclaimedWithdrawalsByAddressController the controller of GetL2UnclaimedWithdrawalsByAddress
type L2UnclaimedWithdrawalsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewL2UnclaimedWithdrawalsByAddressController create new L2UnclaimedWithdrawalsByAddressController
func NewL2UnclaimedWithdrawalsByAddressController(db *gorm.DB, redisClient *redis.Client) *L2UnclaimedWithdrawalsByAddressController {
return &L2UnclaimedWithdrawalsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
func (c *L2UnclaimedWithdrawalsByAddressController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// L2WithdrawalsByAddressController the controller of GetL2WithdrawalsByAddress
type L2WithdrawalsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewL2WithdrawalsByAddressController create new L2WithdrawalsByAddressController
func NewL2WithdrawalsByAddressController(db *gorm.DB, redisClient *redis.Client) *L2WithdrawalsByAddressController {
return &L2WithdrawalsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetL2WithdrawalsByAddress defines the http get method behavior
func (c *L2WithdrawalsByAddressController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// TxsByAddressController the controller of GetTxsByAddress
type TxsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewTxsByAddressController create new TxsByAddressController
func NewTxsByAddressController(db *gorm.DB, redisClient *redis.Client) *TxsByAddressController {
return &TxsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetTxsByAddress defines the http get method behavior
func (c *TxsByAddressController) GetTxsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// TxsByHashesController the controller of PostQueryTxsByHashes
type TxsByHashesController struct {
historyLogic *logic.HistoryLogic
}
// NewTxsByHashesController create a new TxsByHashesController
func NewTxsByHashesController(db *gorm.DB, redisClient *redis.Client) *TxsByHashesController {
return &TxsByHashesController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// PostQueryTxsByHashes query the txs by hashes
func (c *TxsByHashesController) PostQueryTxsByHashes(ctx *gin.Context) {
var req types.QueryByHashRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
return
}
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,165 +0,0 @@
package fetcher
import (
"context"
"math/big"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/utils"
)
// L1MessageFetcher fetches cross message events from L1 and saves them to database.
type L1MessageFetcher struct {
ctx context.Context
cfg *config.FetcherConfig
client *ethclient.Client
l1SyncHeight uint64
l1LastSyncBlockHash common.Hash
eventUpdateLogic *logic.EventUpdateLogic
l1FetcherLogic *logic.L1FetcherLogic
l1MessageFetcherRunningTotal prometheus.Counter
l1MessageFetcherReorgTotal prometheus.Counter
l1MessageFetcherSyncHeight prometheus.Gauge
}
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
c := &L1MessageFetcher{
ctx: ctx,
cfg: cfg,
client: client,
eventUpdateLogic: logic.NewEventUpdateLogic(db, true),
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client),
}
reg := prometheus.DefaultRegisterer
c.l1MessageFetcherRunningTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "L1_message_fetcher_running_total",
Help: "Current count of running L1 message fetcher instances.",
})
c.l1MessageFetcherReorgTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "L1_message_fetcher_reorg_total",
Help: "Total count of blockchain reorgs encountered by the L1 message fetcher.",
})
c.l1MessageFetcherSyncHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "L1_message_fetcher_sync_height",
Help: "Latest blockchain height the L1 message fetcher has synced with.",
})
return c
}
// Start starts the L1 message fetching process.
func (c *L1MessageFetcher) Start() {
messageSyncedHeight, batchSyncedHeight, bridgeBatchDepositSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
if dbErr != nil {
log.Crit("L1MessageFetcher start failed", "err", dbErr)
}
l1SyncHeight := messageSyncedHeight
if batchSyncedHeight > l1SyncHeight {
l1SyncHeight = batchSyncedHeight
}
if bridgeBatchDepositSyncedHeight > l1SyncHeight {
l1SyncHeight = bridgeBatchDepositSyncedHeight
}
if c.cfg.StartHeight > l1SyncHeight {
l1SyncHeight = c.cfg.StartHeight - 1
}
// Sync from an older block to prevent reorg during restart.
if l1SyncHeight < logic.L1ReorgSafeDepth {
l1SyncHeight = 0
} else {
l1SyncHeight -= logic.L1ReorgSafeDepth
}
header, err := c.client.HeaderByNumber(c.ctx, new(big.Int).SetUint64(l1SyncHeight))
if err != nil {
log.Crit("failed to get L1 header by number", "block number", l1SyncHeight, "err", err)
return
}
c.updateL1SyncHeight(l1SyncHeight, header.Hash())
log.Info("Start L1 message fetcher",
"message synced height", messageSyncedHeight,
"batch synced height", batchSyncedHeight,
"bridge batch deposit height", bridgeBatchDepositSyncedHeight,
"config start height", c.cfg.StartHeight,
"sync start height", c.l1SyncHeight+1,
)
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
go func() {
for {
select {
case <-c.ctx.Done():
tick.Stop()
return
case <-tick.C:
c.fetchAndSaveEvents(c.cfg.Confirmation)
}
}
}()
}
func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
startHeight := c.l1SyncHeight + 1
endHeight, rpcErr := utils.GetBlockNumber(c.ctx, c.client, confirmation)
if rpcErr != nil {
log.Error("failed to get L1 block number", "confirmation", confirmation, "err", rpcErr)
return
}
log.Info("fetch and save missing L1 events", "start height", startHeight, "end height", endHeight, "confirmation", confirmation)
for from := startHeight; from <= endHeight; from += c.cfg.FetchLimit {
to := from + c.cfg.FetchLimit - 1
if to > endHeight {
to = endHeight
}
isReorg, resyncHeight, lastBlockHash, l1FetcherResult, fetcherErr := c.l1FetcherLogic.L1Fetcher(c.ctx, from, to, c.l1LastSyncBlockHash)
if fetcherErr != nil {
log.Error("failed to fetch L1 events", "from", from, "to", to, "err", fetcherErr)
return
}
if isReorg {
c.l1MessageFetcherReorgTotal.Inc()
log.Warn("L1 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
c.updateL1SyncHeight(resyncHeight, lastBlockHash)
c.l1MessageFetcherRunningTotal.Inc()
return
}
if insertUpdateErr := c.eventUpdateLogic.L1InsertOrUpdate(c.ctx, l1FetcherResult); insertUpdateErr != nil {
log.Error("failed to save L1 events", "from", from, "to", to, "err", insertUpdateErr)
return
}
c.updateL1SyncHeight(to, lastBlockHash)
c.l1MessageFetcherRunningTotal.Inc()
}
}
func (c *L1MessageFetcher) updateL1SyncHeight(height uint64, blockHash common.Hash) {
c.l1MessageFetcherSyncHeight.Set(float64(height))
c.l1LastSyncBlockHash = blockHash
c.l1SyncHeight = height
}

View File

@@ -1,163 +0,0 @@
package fetcher
import (
"context"
"math/big"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/utils"
)
// L2MessageFetcher fetches cross message events from L2 and saves them to database.
type L2MessageFetcher struct {
ctx context.Context
cfg *config.FetcherConfig
db *gorm.DB
client *ethclient.Client
l2SyncHeight uint64
l2LastSyncBlockHash common.Hash
eventUpdateLogic *logic.EventUpdateLogic
l2FetcherLogic *logic.L2FetcherLogic
l2MessageFetcherRunningTotal prometheus.Counter
l2MessageFetcherReorgTotal prometheus.Counter
l2MessageFetcherSyncHeight prometheus.Gauge
}
// NewL2MessageFetcher creates a new L2MessageFetcher instance.
func NewL2MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
c := &L2MessageFetcher{
ctx: ctx,
cfg: cfg,
db: db,
client: client,
eventUpdateLogic: logic.NewEventUpdateLogic(db, false),
l2FetcherLogic: logic.NewL2FetcherLogic(cfg, db, client),
}
reg := prometheus.DefaultRegisterer
c.l2MessageFetcherRunningTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "L2_message_fetcher_running_total",
Help: "Current count of running L2 message fetcher instances.",
})
c.l2MessageFetcherReorgTotal = promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "L2_message_fetcher_reorg_total",
Help: "Total count of blockchain reorgs encountered by the L2 message fetcher.",
})
c.l2MessageFetcherSyncHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "L2_message_fetcher_sync_height",
Help: "Latest blockchain height the L2 message fetcher has synced with.",
})
return c
}
// Start starts the L2 message fetching process.
func (c *L2MessageFetcher) Start() {
l2SentMessageSyncedHeight, l2BridgeBatchDepositSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
if dbErr != nil {
log.Crit("failed to get L2 cross message processed height", "err", dbErr)
return
}
l2SyncHeight := l2SentMessageSyncedHeight
if l2BridgeBatchDepositSyncedHeight > l2SyncHeight {
l2SyncHeight = l2BridgeBatchDepositSyncedHeight
}
// Sync from an older block to prevent reorg during restart.
if l2SyncHeight < logic.L2ReorgSafeDepth {
l2SyncHeight = 0
} else {
l2SyncHeight -= logic.L2ReorgSafeDepth
}
header, err := c.client.HeaderByNumber(c.ctx, new(big.Int).SetUint64(l2SyncHeight))
if err != nil {
log.Crit("failed to get L2 header by number", "block number", l2SyncHeight, "err", err)
return
}
c.updateL2SyncHeight(l2SyncHeight, header.Hash())
log.Info("Start L2 message fetcher", "l2 sent message synced height", l2SentMessageSyncedHeight,
"bridge batch deposit synced height", l2BridgeBatchDepositSyncedHeight, "sync start height", l2SyncHeight+1)
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
go func() {
for {
select {
case <-c.ctx.Done():
tick.Stop()
return
case <-tick.C:
c.fetchAndSaveEvents(c.cfg.Confirmation)
}
}
}()
}
func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
startHeight := c.l2SyncHeight + 1
endHeight, rpcErr := utils.GetBlockNumber(c.ctx, c.client, confirmation)
if rpcErr != nil {
log.Error("failed to get L2 block number", "confirmation", confirmation, "err", rpcErr)
return
}
log.Info("fetch and save missing L2 events", "start height", startHeight, "end height", endHeight, "confirmation", confirmation)
for from := startHeight; from <= endHeight; from += c.cfg.FetchLimit {
to := from + c.cfg.FetchLimit - 1
if to > endHeight {
to = endHeight
}
isReorg, resyncHeight, lastBlockHash, l2FetcherResult, fetcherErr := c.l2FetcherLogic.L2Fetcher(c.ctx, from, to, c.l2LastSyncBlockHash)
if fetcherErr != nil {
log.Error("failed to fetch L2 events", "from", from, "to", to, "err", fetcherErr)
return
}
if isReorg {
c.l2MessageFetcherReorgTotal.Inc()
log.Warn("L2 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
c.updateL2SyncHeight(resyncHeight, lastBlockHash)
c.l2MessageFetcherRunningTotal.Inc()
return
}
if insertUpdateErr := c.eventUpdateLogic.L2InsertOrUpdate(c.ctx, l2FetcherResult); insertUpdateErr != nil {
log.Error("failed to save L2 events", "from", from, "to", to, "err", insertUpdateErr)
return
}
if updateErr := c.eventUpdateLogic.UpdateL2WithdrawMessageProofs(c.ctx, c.l2SyncHeight); updateErr != nil {
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
return
}
if updateErr := c.eventUpdateLogic.UpdateL2BridgeBatchDepositEvent(c.ctx, l2FetcherResult.BridgeBatchDepositMessage); updateErr != nil {
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
return
}
c.updateL2SyncHeight(to, lastBlockHash)
c.l2MessageFetcherRunningTotal.Inc()
}
}
func (c *L2MessageFetcher) updateL2SyncHeight(height uint64, blockHash common.Hash) {
c.l2MessageFetcherSyncHeight.Set(float64(height))
c.l2LastSyncBlockHash = blockHash
c.l2SyncHeight = height
}

View File

@@ -1,263 +0,0 @@
package logic
import (
"context"
"errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
// EventUpdateLogic the logic of insert/update the database
type EventUpdateLogic struct {
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
bridgeBatchDepositEventOrm *orm.BridgeBatchDepositEvent
eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight prometheus.Gauge
eventUpdateLogicL2MessageNonceUpdateHeight prometheus.Gauge
}
// NewEventUpdateLogic creates a EventUpdateLogic instance
func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
b := &EventUpdateLogic{
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
bridgeBatchDepositEventOrm: orm.NewBridgeBatchDepositEvent(db),
}
if !isL1 {
reg := prometheus.DefaultRegisterer
b.eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "event_update_logic_L1_finalize_batch_event_L2_block_update_height",
Help: "L2 block height of the latest L1 batch event that has been finalized and updated in the message_table.",
})
b.eventUpdateLogicL2MessageNonceUpdateHeight = promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "event_update_logic_L2_message_nonce_update_height",
Help: "L2 message nonce height in the latest L1 batch event that has been finalized and updated in the message_table.",
})
}
return b
}
// GetL1SyncHeight gets the l1 sync height from db
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, uint64, error) {
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, btypes.MessageTypeL1SentMessage)
if err != nil {
log.Error("failed to get L1 cross message synced height", "error", err)
return 0, 0, 0, err
}
batchSyncedHeight, err := b.batchEventOrm.GetBatchEventSyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get L1 batch event synced height", "error", err)
return 0, 0, 0, err
}
bridgeBatchDepositSyncedHeight, err := b.bridgeBatchDepositEventOrm.GetMessageL1SyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get l1 bridge batch deposit synced height", "error", err)
return 0, 0, 0, err
}
return messageSyncedHeight, batchSyncedHeight, bridgeBatchDepositSyncedHeight, nil
}
// GetL2MessageSyncedHeightInDB gets L2 messages synced height
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, uint64, error) {
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, btypes.MessageTypeL2SentMessage)
if err != nil {
log.Error("failed to get L2 cross message processed height", "err", err)
return 0, 0, err
}
l2BridgeBatchDepositSyncHeight, err := b.bridgeBatchDepositEventOrm.GetMessageL2SyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get bridge batch deposit processed height", "err", err)
return 0, 0, err
}
return l2SentMessageSyncedHeight, l2BridgeBatchDepositSyncHeight, nil
}
// L1InsertOrUpdate inserts or updates l1 messages
func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult *L1FilterResult) error {
if err := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages); err != nil {
log.Error("failed to insert L1 deposit messages", "err", err)
return err
}
if err := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages); err != nil {
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", err)
return err
}
if err := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents); err != nil {
log.Error("failed to insert or update batch events", "err", err)
return err
}
if err := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents); err != nil {
log.Error("failed to insert L1 message queue events", "err", err)
return err
}
if err := b.crossMessageOrm.InsertFailedL1GatewayTxs(ctx, l1FetcherResult.RevertedTxs); err != nil {
log.Error("failed to insert failed L1 gateway transactions", "err", err)
return err
}
if err := b.bridgeBatchDepositEventOrm.InsertOrUpdateL1BridgeBatchDepositEvent(ctx, l1FetcherResult.BridgeBatchDepositEvents); err != nil {
log.Error("failed to insert L1 bridge batch deposit transactions", "err", err)
return err
}
return nil
}
func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, batchIndex, startBlock, endBlock uint64) error {
if startBlock > endBlock {
log.Warn("start block is greater than end block", "start", startBlock, "end", endBlock)
return nil
}
l2WithdrawMessages, err := b.crossMessageOrm.GetL2WithdrawalsByBlockRange(ctx, startBlock, endBlock)
if err != nil {
log.Error("failed to get L2 withdrawals by batch index", "batch index", batchIndex, "err", err)
return err
}
if len(l2WithdrawMessages) == 0 {
return nil
}
withdrawTrie := utils.NewWithdrawTrie()
lastMessage, err := b.crossMessageOrm.GetL2LatestFinalizedWithdrawal(ctx)
if err != nil {
log.Error("failed to get latest L2 finalized sent message event", "err", err)
return err
}
if lastMessage != nil {
withdrawTrie.Initialize(lastMessage.MessageNonce, common.HexToHash(lastMessage.MessageHash), lastMessage.MerkleProof)
}
if withdrawTrie.NextMessageNonce != l2WithdrawMessages[0].MessageNonce {
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actual next message nonce", l2WithdrawMessages[0].MessageNonce)
return errors.New("nonce mismatch")
}
messageHashes := make([]common.Hash, len(l2WithdrawMessages))
for i, message := range l2WithdrawMessages {
messageHashes[i] = common.HexToHash(message.MessageHash)
}
proofs := withdrawTrie.AppendMessages(messageHashes)
for i, message := range l2WithdrawMessages {
message.MerkleProof = proofs[i]
message.RollupStatus = int(btypes.RollupStatusTypeFinalized)
message.BatchIndex = batchIndex
}
if dbErr := b.crossMessageOrm.UpdateBatchIndexRollupStatusMerkleProofOfL2Messages(ctx, l2WithdrawMessages); dbErr != nil {
log.Error("failed to update batch index and rollup status and merkle proof of L2 messages", "err", dbErr)
return dbErr
}
b.eventUpdateLogicL2MessageNonceUpdateHeight.Set(float64(withdrawTrie.NextMessageNonce - 1))
return nil
}
// UpdateL2WithdrawMessageProofs updates L2 withdrawal message proofs.
func (b *EventUpdateLogic) UpdateL2WithdrawMessageProofs(ctx context.Context, height uint64) error {
lastUpdatedFinalizedBlockHeight, err := b.batchEventOrm.GetLastUpdatedFinalizedBlockHeight(ctx)
if err != nil {
log.Error("failed to get last updated finalized block height", "error", err)
return err
}
finalizedBatches, err := b.batchEventOrm.GetUnupdatedFinalizedBatchesLEBlockHeight(ctx, height)
if err != nil {
log.Error("failed to get unupdated finalized batches >= block height", "error", err)
return err
}
for _, finalizedBatch := range finalizedBatches {
log.Info("update finalized batch or bundle info of L2 withdrawals", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber)
// This method is compatible with both "finalize by batch" and "finalize by bundle" modes:
// - In "finalize by batch" mode, each batch emits a FinalizedBatch event.
// - In "finalize by bundle" mode, all batches in the bundle emit only one FinalizedBatch event, using the last batch's index and hash.
//
// The method updates two types of information in L2 withdrawal messages:
// 1. Withdraw proof generation:
// - finalize by batch: Generates proofs for each batch.
// - finalize by bundle: Generates proofs for the entire bundle at once.
// 2. Batch index updating:
// - finalize by batch: Updates the batch index for withdrawal messages in each processed batch.
// - finalize by bundle: Updates the batch index for all withdrawal messages in the bundle, using the index of the last batch in the bundle.
if updateErr := b.updateL2WithdrawMessageInfos(ctx, finalizedBatch.BatchIndex, lastUpdatedFinalizedBlockHeight+1, finalizedBatch.EndBlockNumber); updateErr != nil {
log.Error("failed to update L2 withdraw message infos", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", updateErr)
return updateErr
}
if dbErr := b.batchEventOrm.UpdateBatchEventStatus(ctx, finalizedBatch.BatchIndex); dbErr != nil {
log.Error("failed to update batch event status as updated", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", dbErr)
return dbErr
}
lastUpdatedFinalizedBlockHeight = finalizedBatch.EndBlockNumber
b.eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight.Set(float64(finalizedBatch.EndBlockNumber))
}
return nil
}
// UpdateL2BridgeBatchDepositEvent update l2 bridge batch deposit status
func (b *EventUpdateLogic) UpdateL2BridgeBatchDepositEvent(ctx context.Context, l2BatchDistributes []*orm.BridgeBatchDepositEvent) error {
distributeFailedMap := make(map[uint64][]string)
for _, l2BatchDistribute := range l2BatchDistributes {
if btypes.TxStatusType(l2BatchDistribute.TxStatus) == btypes.TxStatusBridgeBatchDistributeFailed {
distributeFailedMap[l2BatchDistribute.BatchIndex] = append(distributeFailedMap[l2BatchDistribute.BatchIndex], l2BatchDistribute.Sender)
}
if err := b.bridgeBatchDepositEventOrm.UpdateBatchEventStatus(ctx, l2BatchDistribute); err != nil {
log.Error("failed to update L1 bridge batch distribute event", "batchIndex", l2BatchDistribute.BatchIndex, "err", err)
return err
}
}
for batchIndex, distributeFailedSenders := range distributeFailedMap {
if err := b.bridgeBatchDepositEventOrm.UpdateDistributeFailedStatus(ctx, batchIndex, distributeFailedSenders); err != nil {
log.Error("failed to update L1 bridge batch distribute failed event", "batchIndex", batchIndex, "failed senders", distributeFailedSenders, "err", err)
return err
}
}
return nil
}
// L2InsertOrUpdate inserts or updates L2 messages
func (b *EventUpdateLogic) L2InsertOrUpdate(ctx context.Context, l2FetcherResult *L2FilterResult) error {
if err := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages); err != nil {
log.Error("failed to insert L2 withdrawal messages", "err", err)
return err
}
if err := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages); err != nil {
log.Error("failed to update L2 relayed messages of L1 deposits", "err", err)
return err
}
if err := b.crossMessageOrm.InsertFailedL2GatewayTxs(ctx, l2FetcherResult.OtherRevertedTxs); err != nil {
log.Error("failed to insert failed L2 gateway transactions", "err", err)
return err
}
return nil
}

View File

@@ -1,454 +0,0 @@
package logic
import (
"context"
"encoding/json"
"errors"
"reflect"
"strconv"
"time"
"github.com/go-redis/redis/v8"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/singleflight"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/types"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
const (
// cacheKeyPrefixBridgeHistory serves as a specific namespace for all Redis cache keys
// associated with the 'bridge-history' user. This prefix is used to enforce access controls
// in Redis, allowing permissions to be set such that only users with the appropriate
// access rights can read or write to keys starting with "bridge-history".
cacheKeyPrefixBridgeHistory = "bridge-history-"
cacheKeyPrefixL2ClaimableWithdrawalsByAddr = cacheKeyPrefixBridgeHistory + "l2ClaimableWithdrawalsByAddr:"
cacheKeyPrefixL2WithdrawalsByAddr = cacheKeyPrefixBridgeHistory + "l2WithdrawalsByAddr:"
cacheKeyPrefixTxsByAddr = cacheKeyPrefixBridgeHistory + "txsByAddr:"
cacheKeyPrefixQueryTxsByHashes = cacheKeyPrefixBridgeHistory + "queryTxsByHashes:"
cacheKeyExpiredTime = 1 * time.Minute
)
// HistoryLogic services.
type HistoryLogic struct {
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
bridgeBatchDepositOrm *orm.BridgeBatchDepositEvent
redis *redis.Client
singleFlight singleflight.Group
cacheMetrics *cacheMetrics
}
// NewHistoryLogic returns bridge history services.
func NewHistoryLogic(db *gorm.DB, redis *redis.Client) *HistoryLogic {
logic := &HistoryLogic{
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
bridgeBatchDepositOrm: orm.NewBridgeBatchDepositEvent(db),
redis: redis,
cacheMetrics: initCacheMetrics(),
}
return logic
}
// GetL2UnclaimedWithdrawalsByAddress gets all unclaimed withdrawal txs under given address.
func (h *HistoryLogic) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, address string, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
cacheKey := cacheKeyPrefixL2ClaimableWithdrawalsByAddr + address
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
if err != nil {
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
if isHit {
h.cacheMetrics.cacheHits.WithLabelValues("GetL2UnclaimedWithdrawalsByAddress").Inc()
log.Info("cache hit", "cache key", cacheKey)
return pagedTxs, total, nil
}
h.cacheMetrics.cacheMisses.WithLabelValues("GetL2UnclaimedWithdrawalsByAddress").Inc()
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
}
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
return txHistoryInfos, nil
})
if err != nil {
log.Error("failed to get L2 claimable withdrawals by address", "address", address, "error", err)
return nil, 0, err
}
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
}
// GetL2WithdrawalsByAddress gets all withdrawal txs under given address.
func (h *HistoryLogic) GetL2WithdrawalsByAddress(ctx context.Context, address string, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
cacheKey := cacheKeyPrefixL2WithdrawalsByAddr + address
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
if err != nil {
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
if isHit {
h.cacheMetrics.cacheHits.WithLabelValues("GetL2WithdrawalsByAddress").Inc()
log.Info("cache hit", "cache key", cacheKey)
return pagedTxs, total, nil
}
h.cacheMetrics.cacheMisses.WithLabelValues("GetL2WithdrawalsByAddress").Inc()
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
}
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
return txHistoryInfos, nil
})
if err != nil {
log.Error("failed to get L2 withdrawals by address", "address", address, "error", err)
return nil, 0, err
}
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
}
// GetTxsByAddress gets tx infos under given address.
func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address string, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
cacheKey := cacheKeyPrefixTxsByAddr + address
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
if err != nil {
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
if isHit {
h.cacheMetrics.cacheHits.WithLabelValues("GetTxsByAddress").Inc()
log.Info("cache hit", "cache key", cacheKey)
return pagedTxs, total, nil
}
h.cacheMetrics.cacheMisses.WithLabelValues("GetTxsByAddress").Inc()
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetTxsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
}
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
batchDepositMessages, getErr := h.bridgeBatchDepositOrm.GetTxsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
}
for _, message := range batchDepositMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromBridgeBatchDepositMessage(message))
}
return txHistoryInfos, nil
})
if err != nil {
log.Error("failed to get txs by address", "address", address, "error", err)
return nil, 0, err
}
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
}
// GetTxsByHashes gets tx infos under given tx hashes.
func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([]*types.TxHistoryInfo, error) {
hashesMap := make(map[string]struct{}, len(txHashes))
results := make([]*types.TxHistoryInfo, 0, len(txHashes))
uncachedHashes := make([]string, 0, len(txHashes))
for _, hash := range txHashes {
if _, exists := hashesMap[hash]; exists {
// Skip duplicate tx hash values.
continue
}
hashesMap[hash] = struct{}{}
cacheKey := cacheKeyPrefixQueryTxsByHashes + hash
cachedData, err := h.redis.Get(ctx, cacheKey).Bytes()
if err != nil && errors.Is(err, redis.Nil) {
h.cacheMetrics.cacheMisses.WithLabelValues("PostQueryTxsByHashes").Inc()
log.Info("cache miss", "cache key", cacheKey)
uncachedHashes = append(uncachedHashes, hash)
continue
}
if err != nil {
log.Error("failed to get data from Redis", "error", err)
uncachedHashes = append(uncachedHashes, hash)
continue
}
h.cacheMetrics.cacheHits.WithLabelValues("PostQueryTxsByHashes").Inc()
log.Info("cache hit", "cache key", cacheKey)
if len(cachedData) == 0 {
continue
}
var txInfo types.TxHistoryInfo
if unmarshalErr := json.Unmarshal(cachedData, &txInfo); unmarshalErr != nil {
log.Error("failed to unmarshal cached data", "error", unmarshalErr)
uncachedHashes = append(uncachedHashes, hash)
continue
}
results = append(results, &txInfo)
}
if len(uncachedHashes) > 0 {
var txHistories []*types.TxHistoryInfo
crossMessages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get cross messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
for _, message := range crossMessages {
txHistories = append(txHistories, getTxHistoryInfoFromCrossMessage(message))
}
batchDepositMessages, err := h.bridgeBatchDepositOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get batch deposit messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
for _, message := range batchDepositMessages {
txHistories = append(txHistories, getTxHistoryInfoFromBridgeBatchDepositMessage(message))
}
resultMap := make(map[string]*types.TxHistoryInfo)
for _, result := range txHistories {
results = append(results, result)
resultMap[result.Hash] = result
}
for _, hash := range uncachedHashes {
cacheKey := cacheKeyPrefixQueryTxsByHashes + hash
result, found := resultMap[hash]
if !found {
// tx hash not found, which is also a valid result, cache empty string.
if cacheErr := h.redis.Set(ctx, cacheKey, "", cacheKeyExpiredTime).Err(); cacheErr != nil {
log.Error("failed to set data to Redis", "error", cacheErr)
}
continue
}
jsonData, unmarshalErr := json.Marshal(result)
if unmarshalErr != nil {
log.Error("failed to marshal data", "error", unmarshalErr)
continue
}
if cacheErr := h.redis.Set(ctx, cacheKey, jsonData, cacheKeyExpiredTime).Err(); cacheErr != nil {
log.Error("failed to set data to Redis", "error", cacheErr)
}
}
}
return results, nil
}
func getTxHistoryInfoFromCrossMessage(message *orm.CrossMessage) *types.TxHistoryInfo {
txHistory := &types.TxHistoryInfo{
MessageHash: message.MessageHash,
TokenType: btypes.TokenType(message.TokenType),
TokenIDs: utils.ConvertStringToStringArray(message.TokenIDs),
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmounts),
L1TokenAddress: message.L1TokenAddress,
L2TokenAddress: message.L2TokenAddress,
MessageType: btypes.MessageType(message.MessageType),
TxStatus: btypes.TxStatusType(message.TxStatus),
BlockTimestamp: message.BlockTimestamp,
}
if txHistory.MessageType == btypes.MessageTypeL1SentMessage {
txHistory.Hash = message.L1TxHash
txHistory.ReplayTxHash = message.L1ReplayTxHash
txHistory.RefundTxHash = message.L1RefundTxHash
txHistory.BlockNumber = message.L1BlockNumber
txHistory.CounterpartChainTx = &types.CounterpartChainTx{
Hash: message.L2TxHash,
BlockNumber: message.L2BlockNumber,
}
} else {
txHistory.Hash = message.L2TxHash
txHistory.BlockNumber = message.L2BlockNumber
txHistory.CounterpartChainTx = &types.CounterpartChainTx{
Hash: message.L1TxHash,
BlockNumber: message.L1BlockNumber,
}
if btypes.RollupStatusType(message.RollupStatus) == btypes.RollupStatusTypeFinalized {
txHistory.ClaimInfo = &types.ClaimInfo{
From: message.MessageFrom,
To: message.MessageTo,
Value: message.MessageValue,
Nonce: strconv.FormatUint(message.MessageNonce, 10),
Message: message.MessageData,
Proof: types.L2MessageProof{
BatchIndex: strconv.FormatUint(message.BatchIndex, 10),
MerkleProof: "0x" + common.Bytes2Hex(message.MerkleProof),
},
Claimable: true,
}
}
}
return txHistory
}
func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepositEvent) *types.TxHistoryInfo {
txHistory := &types.TxHistoryInfo{
Hash: message.L1TxHash,
TokenType: btypes.TokenType(message.TokenType),
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmount),
BlockNumber: message.L1BlockNumber,
MessageType: btypes.MessageTypeL1BatchDeposit,
TxStatus: btypes.TxStatusType(message.TxStatus),
CounterpartChainTx: &types.CounterpartChainTx{
Hash: message.L2TxHash,
BlockNumber: message.L2BlockNumber,
},
BlockTimestamp: message.BlockTimestamp,
BatchDepositFee: message.Fee,
}
if txHistory.TokenType != btypes.TokenTypeETH {
txHistory.L1TokenAddress = message.L1TokenAddress
txHistory.L2TokenAddress = message.L2TokenAddress
}
return txHistory
}
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
start := int64((pageNum - 1) * pageSize)
end := start + int64(pageSize) - 1
total, err := h.redis.ZCard(ctx, cacheKey).Result()
if err != nil {
log.Error("failed to get zcard result", "error", err)
return nil, 0, false, err
}
if total == 0 {
return nil, 0, false, nil
}
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
if err != nil {
log.Error("failed to get zrange result", "error", err)
return nil, 0, false, err
}
if len(values) == 0 {
return nil, 0, false, nil
}
// check if it's empty placeholder.
if len(values) == 1 && values[0] == "empty_page" {
return nil, 0, true, nil
}
var pagedTxs []*types.TxHistoryInfo
for _, v := range values {
var tx types.TxHistoryInfo
if unmarshalErr := json.Unmarshal([]byte(v), &tx); unmarshalErr != nil {
log.Error("failed to unmarshal transaction data", "error", unmarshalErr)
return nil, 0, false, unmarshalErr
}
pagedTxs = append(pagedTxs, &tx)
}
return pagedTxs, uint64(total), true, nil
}
func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []*types.TxHistoryInfo) error {
_, err := h.redis.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
if len(txs) == 0 {
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: 0, Member: "empty_page"}).Err(); err != nil {
log.Error("failed to add empty page indicator to sorted set", "error", err)
return err
}
} else {
// The transactions are sorted, thus we set the score as their index.
for _, tx := range txs {
txBytes, err := json.Marshal(tx)
if err != nil {
log.Error("failed to marshal transaction to json", "error", err)
return err
}
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(tx.BlockTimestamp), Member: txBytes}).Err(); err != nil {
log.Error("failed to add transaction to sorted set", "error", err)
return err
}
}
}
if err := pipe.Expire(ctx, cacheKey, cacheKeyExpiredTime).Err(); err != nil {
log.Error("failed to set expiry time", "error", err)
return err
}
return nil
})
if err != nil {
log.Error("failed to execute transaction", "error", err)
return err
}
return nil
}
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, txHistories []*types.TxHistoryInfo, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
err := h.cacheTxsInfo(ctx, cacheKey, txHistories)
if err != nil {
log.Error("failed to cache txs info", "key", cacheKey, "err", err)
return nil, 0, err
}
pagedTxs, total, isHit, err := h.getCachedTxsInfo(ctx, cacheKey, page, pageSize)
if err != nil {
log.Error("failed to get cached tx info", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
if !isHit {
log.Error("cache miss after write, expect hit", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
return pagedTxs, total, nil
}

View File

@@ -1,40 +0,0 @@
package logic
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type cacheMetrics struct {
cacheHits *prometheus.CounterVec
cacheMisses *prometheus.CounterVec
}
var (
initMetricsOnce sync.Once
cm *cacheMetrics
)
func initCacheMetrics() *cacheMetrics {
initMetricsOnce.Do(func() {
cm = &cacheMetrics{
cacheHits: promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "bridge_history_api_cache_hits_total",
Help: "The total number of cache hits",
},
[]string{"api"},
),
cacheMisses: promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "bridge_history_api_cache_misses_total",
Help: "The total number of cache misses",
},
[]string{"api"},
),
}
})
return cm
}

View File

@@ -1,391 +0,0 @@
package logic
import (
"context"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
// L1EventParser the l1 event parser
type L1EventParser struct {
cfg *config.FetcherConfig
client *ethclient.Client
}
// NewL1EventParser creates l1 event parser
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1EventParser {
return &L1EventParser{
cfg: cfg,
client: client,
}
}
// ParseL1CrossChainEventLogs parse l1 cross chain event logs
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, []*orm.BridgeBatchDepositEvent, error) {
l1CrossChainDepositMessages, l1CrossChainRelayedMessages, err := e.ParseL1SingleCrossChainEventLogs(ctx, logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
l1BridgeBatchDepositMessages, err := e.ParseL1BridgeBatchDepositCrossChainEventLogs(logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
return l1CrossChainDepositMessages, l1CrossChainRelayedMessages, l1BridgeBatchDepositMessages, nil
}
// ParseL1BridgeBatchDepositCrossChainEventLogs parse L1 watched batch bridge cross chain events.
func (e *L1EventParser) ParseL1BridgeBatchDepositCrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.BridgeBatchDepositEvent, error) {
var l1BridgeBatchDepositMessages []*orm.BridgeBatchDepositEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1BridgeBatchDepositSig:
event := backendabi.L1BatchBridgeGatewayDeposit{}
if err := utils.UnpackLog(backendabi.L1BatchBridgeGatewayABI, &event, "Deposit", vlog); err != nil {
log.Error("Failed to unpack batch bridge gateway deposit event", "err", err)
return nil, err
}
var tokenType btypes.TokenType
if event.Token == common.HexToAddress("0") {
tokenType = btypes.TokenTypeETH
} else {
tokenType = btypes.TokenTypeERC20
}
l1BridgeBatchDepositMessages = append(l1BridgeBatchDepositMessages, &orm.BridgeBatchDepositEvent{
TokenType: int(tokenType),
Sender: event.Sender.String(),
BatchIndex: event.BatchIndex.Uint64(),
TokenAmount: event.Amount.String(),
Fee: event.Fee.String(),
L1TokenAddress: event.Token.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDeposit),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
L1LogIndex: vlog.Index,
})
}
}
return l1BridgeBatchDepositMessages, nil
}
// ParseL1SingleCrossChainEventLogs parses L1 watched single cross chain events.
func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l1DepositMessages []*orm.CrossMessage
var l1RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1DepositETHSig:
event := backendabi.ETHMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ETHGatewayABI, &event, "DepositETH", vlog); err != nil {
log.Error("Failed to unpack DepositETH event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeETH)
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.IL1ERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil {
log.Error("Failed to unpack DepositERC20 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC20)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "DepositERC721", vlog); err != nil {
log.Error("Failed to unpack DepositERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
case backendabi.L1BatchDepositERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "BatchDepositERC721", vlog); err != nil {
log.Error("Failed to unpack BatchDepositERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "DepositERC1155", vlog); err != nil {
log.Error("Failed to unpack DepositERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L1BatchDepositERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog); err != nil {
log.Error("Failed to unpack BatchDepositERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
case backendabi.L1DepositWrappedTokenSig:
event := backendabi.WrappedTokenMessageEvent{}
if err := utils.UnpackLog(backendabi.L1WrappedTokenGatewayABI, &event, "DepositWrappedToken", vlog); err != nil {
log.Error("Failed to unpack DepositWrappedToken event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
case backendabi.L1SentMessageEventSig:
event := backendabi.L1SentMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
log.Error("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
from, err := getRealFromAddress(ctx, event.Sender, event.Message, e.client, vlog.TxHash, e.cfg.GatewayRouterAddr)
if err != nil {
log.Error("Failed to get real 'from' address", "err", err)
return nil, nil, err
}
l1DepositMessages = append(l1DepositMessages, &orm.CrossMessage{
L1BlockNumber: vlog.BlockNumber,
Sender: from,
Receiver: event.Target.String(),
TokenType: int(btypes.TokenTypeETH),
L1TxHash: vlog.TxHash.String(),
TokenAmounts: event.Value.String(),
MessageNonce: event.MessageNonce.Uint64(),
MessageType: int(btypes.MessageTypeL1SentMessage),
TxStatus: int(btypes.TxStatusTypeSent),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
})
case backendabi.L1RelayedMessageEventSig:
event := backendabi.L1RelayedMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "RelayedMessage", vlog); err != nil {
log.Error("Failed to unpack RelayedMessage event", "err", err)
return nil, nil, err
}
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeRelayed),
MessageType: int(btypes.MessageTypeL2SentMessage),
})
case backendabi.L1FailedRelayedMessageEventSig:
event := backendabi.L1FailedRelayedMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "FailedRelayedMessage", vlog); err != nil {
log.Error("Failed to unpack FailedRelayedMessage event", "err", err)
return nil, nil, err
}
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeFailedRelayed),
MessageType: int(btypes.MessageTypeL2SentMessage),
})
}
}
return l1DepositMessages, l1RelayedMessages, nil
}
// ParseL1BatchEventLogs parses L1 watched batch events.
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client) ([]*orm.BatchEvent, error) {
var l1BatchEvents []*orm.BatchEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1CommitBatchEventSig:
event := backendabi.L1CommitBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "CommitBatch", vlog); err != nil {
log.Error("Failed to unpack CommitBatch event", "err", err)
return nil, err
}
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending {
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
return nil, err
}
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
if err != nil {
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeCommitted),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
StartBlockNumber: startBlock,
EndBlockNumber: endBlock,
L1BlockNumber: vlog.BlockNumber,
})
case backendabi.L1RevertBatchEventSig:
event := backendabi.L1RevertBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
log.Error("Failed to unpack RevertBatch event", "err", err)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeReverted),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
})
case backendabi.L1FinalizeBatchEventSig:
event := backendabi.L1FinalizeBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
log.Error("Failed to unpack FinalizeBatch event", "err", err)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeFinalized),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
})
}
}
return l1BatchEvents, nil
}
// ParseL1MessageQueueEventLogs parses L1 watched message queue events.
func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1DepositMessages []*orm.CrossMessage) ([]*orm.MessageQueueEvent, error) {
messageHashes := make(map[common.Hash]struct{})
for _, msg := range l1DepositMessages {
messageHashes[common.HexToHash(msg.MessageHash)] = struct{}{}
}
var l1MessageQueueEvents []*orm.MessageQueueEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1QueueTransactionEventSig:
event := backendabi.L1QueueTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "QueueTransaction", vlog); err != nil {
log.Error("Failed to unpack QueueTransaction event", "err", err)
return nil, err
}
messageHash := common.BytesToHash(crypto.Keccak256(event.Data))
// If the message hash is not found in the map, it's not a replayMessage or enforced tx (omitted); add it to the events.
if _, exists := messageHashes[messageHash]; !exists {
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeQueueTransaction,
QueueIndex: event.QueueIndex,
MessageHash: messageHash,
TxHash: vlog.TxHash,
})
}
case backendabi.L1DequeueTransactionEventSig:
event := backendabi.L1DequeueTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DequeueTransaction", vlog); err != nil {
log.Error("Failed to unpack DequeueTransaction event", "err", err)
return nil, err
}
skippedIndices := utils.GetSkippedQueueIndices(event.StartIndex.Uint64(), event.SkippedBitmap)
for _, index := range skippedIndices {
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeDequeueTransaction,
QueueIndex: index,
})
}
case backendabi.L1ResetDequeuedTransactionEventSig:
event := backendabi.L1ResetDequeuedTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "ResetDequeuedTransaction", vlog); err != nil {
log.Error("Failed to unpack ResetDequeuedTransaction event", "err", err)
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeResetDequeuedTransaction,
QueueIndex: event.StartIndex.Uint64(),
})
case backendabi.L1DropTransactionEventSig:
event := backendabi.L1DropTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil {
log.Error("Failed to unpack DropTransaction event", "err", err)
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeDropTransaction,
QueueIndex: event.Index.Uint64(),
TxHash: vlog.TxHash,
})
}
}
return l1MessageQueueEvents, nil
}
func getRealFromAddress(ctx context.Context, eventSender common.Address, eventMessage []byte, client *ethclient.Client, txHash common.Hash, gatewayRouterAddr string) (string, error) {
if eventSender != common.HexToAddress(gatewayRouterAddr) {
return eventSender.String(), nil
}
// deposit/withdraw ETH: EOA -> contract 1 -> ... -> contract n -> gateway router -> messenger.
if len(eventMessage) >= 32 {
addressBytes := eventMessage[32-common.AddressLength : 32]
var address common.Address
address.SetBytes(addressBytes)
return address.Hex(), nil
}
log.Warn("event message data too short to contain an address", "length", len(eventMessage))
// Legacy handling logic if length of message < 32, for backward compatibility before the next contract upgrade.
tx, isPending, rpcErr := client.TransactionByHash(ctx, txHash)
if rpcErr != nil || isPending {
log.Error("Failed to get transaction or the transaction is still pending", "rpcErr", rpcErr, "isPending", isPending)
return "", rpcErr
}
// Case 1: deposit/withdraw ETH: EOA -> multisig -> gateway router -> messenger.
if tx.To() != nil && (*tx.To()).String() != gatewayRouterAddr {
return (*tx.To()).String(), nil
}
// Case 2: deposit/withdraw ETH: EOA -> gateway router -> messenger.
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, err := signer.Sender(tx)
if err != nil {
log.Error("Get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", err)
return "", err
}
return sender.String(), nil
}

View File

@@ -1,373 +0,0 @@
package logic
import (
"context"
"math/big"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
// L1ReorgSafeDepth represents the number of block confirmations considered safe against L1 chain reorganizations.
// Reorganizations at this depth under normal cases are extremely unlikely.
const L1ReorgSafeDepth = 64
// L1FilterResult L1 fetcher result
type L1FilterResult struct {
DepositMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage
BatchEvents []*orm.BatchEvent
MessageQueueEvents []*orm.MessageQueueEvent
RevertedTxs []*orm.CrossMessage
BridgeBatchDepositEvents []*orm.BridgeBatchDepositEvent
}
// L1FetcherLogic the L1 fetcher logic
type L1FetcherLogic struct {
cfg *config.FetcherConfig
client *ethclient.Client
addressList []common.Address
gatewayList []common.Address
parser *L1EventParser
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
l1FetcherLogicFetchedTotal *prometheus.CounterVec
}
// NewL1FetcherLogic creates L1 fetcher logic
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
addressList := []common.Address{
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
common.HexToAddress(cfg.ERC1155GatewayAddr),
common.HexToAddress(cfg.MessengerAddr),
common.HexToAddress(cfg.ScrollChainAddr),
common.HexToAddress(cfg.MessageQueueAddr),
}
gatewayList := []common.Address{
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
common.HexToAddress(cfg.ERC1155GatewayAddr),
common.HexToAddress(cfg.MessengerAddr),
common.HexToAddress(cfg.GatewayRouterAddr),
}
// Optional gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
}
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
if common.HexToAddress(cfg.BatchBridgeGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
if common.HexToAddress(cfg.ETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.ETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.ETHGatewayAddr))
}
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
}
if common.HexToAddress(cfg.GasTokenGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.GasTokenGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.GasTokenGatewayAddr))
}
if common.HexToAddress(cfg.WrappedTokenGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L1FetcherLogic{
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
cfg: cfg,
client: client,
addressList: addressList,
gatewayList: gatewayList,
parser: NewL1EventParser(cfg, client),
}
reg := prometheus.DefaultRegisterer
f.l1FetcherLogicFetchedTotal = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "L1_fetcher_logic_fetched_total",
Help: "The total number of events or failed txs fetched in L1 fetcher logic.",
}, []string{"type"})
return f
}
func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.Block, error) {
blocks, err := utils.GetBlocksInRange(ctx, f.client, from, to)
if err != nil {
log.Error("failed to get L1 blocks in range", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
for _, block := range blocks {
if block.ParentHash() != lastBlockHash {
log.Warn("L1 reorg detected", "reorg height", block.NumberU64()-1, "expected hash", block.ParentHash().String(), "local hash", lastBlockHash.String())
var resyncHeight uint64
if block.NumberU64() > L1ReorgSafeDepth+1 {
resyncHeight = block.NumberU64() - L1ReorgSafeDepth - 1
}
header, err := f.client.HeaderByNumber(ctx, new(big.Int).SetUint64(resyncHeight))
if err != nil {
log.Error("failed to get L1 header by number", "block number", resyncHeight, "err", err)
return false, 0, common.Hash{}, nil, err
}
return true, resyncHeight, header.Hash(), nil, nil
}
lastBlockHash = block.Hash()
}
return false, 0, lastBlockHash, blocks, nil
}
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, error) {
var l1RevertedTxs []*orm.CrossMessage
blockTimestampsMap := make(map[uint64]uint64)
for i := from; i <= to; i++ {
block := blocks[i-from]
blockTimestampsMap[block.NumberU64()] = block.Time()
for _, tx := range block.Transactions() {
// Gateways: L1 deposit.
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
if !isTransactionToGateway(tx, f.gatewayList) {
continue
}
var receipt *types.Receipt
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, nil, receiptErr
}
// Check if the transaction is failed
if receipt.Status != types.ReceiptStatusFailed {
continue
}
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, senderErr := signer.Sender(tx)
if senderErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", senderErr)
return nil, nil, senderErr
}
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
L1TxHash: tx.Hash().String(),
MessageType: int(btypes.MessageTypeL1SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L1BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(btypes.TxStatusTypeSentTxReverted),
})
}
}
return blockTimestampsMap, l1RevertedTxs, nil
}
func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
query := ethereum.FilterQuery{
FromBlock: new(big.Int).SetUint64(from), // inclusive
ToBlock: new(big.Int).SetUint64(to), // inclusive
Addresses: f.addressList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 16)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1DepositERC721Sig
query.Topics[0][3] = backendabi.L1DepositERC1155Sig
query.Topics[0][4] = backendabi.L1SentMessageEventSig
query.Topics[0][5] = backendabi.L1RelayedMessageEventSig
query.Topics[0][6] = backendabi.L1FailedRelayedMessageEventSig
query.Topics[0][7] = backendabi.L1CommitBatchEventSig
query.Topics[0][8] = backendabi.L1RevertBatchEventSig
query.Topics[0][9] = backendabi.L1FinalizeBatchEventSig
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
query.Topics[0][13] = backendabi.L1ResetDequeuedTransactionEventSig
query.Topics[0][14] = backendabi.L1BridgeBatchDepositSig
query.Topics[0][15] = backendabi.L1DepositWrappedTokenSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
log.Error("failed to filter L1 event logs", "from", from, "to", to, "err", err)
return nil, err
}
return eventLogs, nil
}
// L1Fetcher L1 fetcher
func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, *L1FilterResult, error) {
log.Info("fetch and save L1 events", "from", from, "to", to)
isReorg, reorgHeight, blockHash, blocks, getErr := f.getBlocksAndDetectReorg(ctx, from, to, lastBlockHash)
if getErr != nil {
log.Error("L1Fetcher getBlocksAndDetectReorg failed", "from", from, "to", to, "error", getErr)
return false, 0, common.Hash{}, nil, getErr
}
if isReorg {
return isReorg, reorgHeight, blockHash, nil, nil
}
blockTimestampsMap, l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
if err != nil {
log.Error("L1Fetcher getRevertedTxs failed", "from", from, "to", to, "error", err)
return false, 0, common.Hash{}, nil, err
}
eventLogs, err := f.l1FetcherLogs(ctx, from, to)
if err != nil {
log.Error("L1Fetcher l1FetcherLogs failed", "from", from, "to", to, "error", err)
return false, 0, common.Hash{}, nil, err
}
l1DepositMessages, l1RelayedMessages, l1BridgeBatchDepositMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L1 cross chain event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client)
if err != nil {
log.Error("failed to parse L1 batch event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
l1MessageQueueEvents, err := f.parser.ParseL1MessageQueueEventLogs(eventLogs, l1DepositMessages)
if err != nil {
log.Error("failed to parse L1 message queue event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
res := L1FilterResult{
DepositMessages: l1DepositMessages,
RelayedMessages: l1RelayedMessages,
BatchEvents: l1BatchEvents,
MessageQueueEvents: l1MessageQueueEvents,
RevertedTxs: l1RevertedTxs,
BridgeBatchDepositEvents: l1BridgeBatchDepositMessages,
}
f.updateMetrics(res)
return false, 0, blockHash, &res, nil
}
func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_gateway_router_transaction").Add(float64(len(res.RevertedTxs)))
for _, depositMessage := range res.DepositMessages {
switch btypes.TokenType(depositMessage.TokenType) {
case btypes.TokenTypeETH:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_eth").Add(1)
case btypes.TokenTypeERC20:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc20").Add(1)
case btypes.TokenTypeERC721:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc721").Add(1)
case btypes.TokenTypeERC1155:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc1155").Add(1)
}
}
for _, relayedMessage := range res.RelayedMessages {
switch btypes.TxStatusType(relayedMessage.TxStatus) {
case btypes.TxStatusTypeRelayed:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_relayed_message").Add(1)
case btypes.TxStatusTypeFailedRelayed:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_relayed_message").Add(1)
}
// Have not tracked L1 relayed message reverted transaction yet.
// 1. need to parse calldata of tx.
// 2. hard to track internal tx.
}
for _, batchEvent := range res.BatchEvents {
switch btypes.BatchStatusType(batchEvent.BatchStatus) {
case btypes.BatchStatusTypeCommitted:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_commit_batch_event").Add(1)
case btypes.BatchStatusTypeReverted:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_revert_batch_event").Add(1)
case btypes.BatchStatusTypeFinalized:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_finalize_batch_event").Add(1)
}
}
for _, messageQueueEvent := range res.MessageQueueEvents {
switch messageQueueEvent.EventType {
case btypes.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_replay_message_or_enforced_transaction").Add(1)
case btypes.MessageQueueEventTypeDequeueTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1)
case btypes.MessageQueueEventTypeDropTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1)
// one ResetDequeuedTransaction event could indicate reset multiple skipped messages,
// this metric only counts the number of events, not the number of skipped messages.
case btypes.MessageQueueEventTypeResetDequeuedTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_reset_skipped_messages").Add(1)
}
}
for _, bridgeBatchDepositEvent := range res.BridgeBatchDepositEvents {
switch btypes.TokenType(bridgeBatchDepositEvent.TokenType) {
case btypes.TokenTypeETH:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_bridge_batch_deposit_eth").Add(1)
case btypes.TokenTypeERC20:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_bridge_batch_deposit_erc20").Add(1)
}
}
}

View File

@@ -1,247 +0,0 @@
package logic
import (
"context"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
// L2EventParser the L2 event parser
type L2EventParser struct {
cfg *config.FetcherConfig
client *ethclient.Client
}
// NewL2EventParser creates the L2 event parser
func NewL2EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L2EventParser {
return &L2EventParser{
cfg: cfg,
client: client,
}
}
// ParseL2EventLogs parses L2 watchedevents
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, []*orm.BridgeBatchDepositEvent, error) {
l2WithdrawMessages, l2RelayedMessages, err := e.ParseL2SingleCrossChainEventLogs(ctx, logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
l2BridgeBatchDepositMessages, err := e.ParseL2BridgeBatchDepositCrossChainEventLogs(logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
return l2WithdrawMessages, l2RelayedMessages, l2BridgeBatchDepositMessages, nil
}
// ParseL2BridgeBatchDepositCrossChainEventLogs parses L2 watched bridge batch deposit events
func (e *L2EventParser) ParseL2BridgeBatchDepositCrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.BridgeBatchDepositEvent, error) {
var l2BridgeBatchDepositEvents []*orm.BridgeBatchDepositEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2BridgeBatchDistributeSig:
event := backendabi.L2BatchBridgeGatewayBatchDistribute{}
err := utils.UnpackLog(backendabi.L2BatchBridgeGatewayABI, &event, "BatchDistribute", vlog)
if err != nil {
log.Error("Failed to unpack BatchDistribute event", "err", err)
return nil, err
}
var tokenType btypes.TokenType
if event.L1Token == common.HexToAddress("0") {
tokenType = btypes.TokenTypeETH
} else {
tokenType = btypes.TokenTypeERC20
}
l2BridgeBatchDepositEvents = append(l2BridgeBatchDepositEvents, &orm.BridgeBatchDepositEvent{
TokenType: int(tokenType),
BatchIndex: event.BatchIndex.Uint64(),
L2TokenAddress: event.L2Token.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDistribute),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
})
case backendabi.L2BridgeBatchDistributeFailedSig:
event := backendabi.L2BatchBridgeGatewayDistributeFailed{}
err := utils.UnpackLog(backendabi.L2BatchBridgeGatewayABI, &event, "DistributeFailed", vlog)
if err != nil {
log.Error("Failed to unpack DistributeFailed event", "err", err)
return nil, err
}
l2BridgeBatchDepositEvents = append(l2BridgeBatchDepositEvents, &orm.BridgeBatchDepositEvent{
BatchIndex: event.BatchIndex.Uint64(),
L2TokenAddress: event.L2Token.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDistributeFailed),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
Sender: event.Receiver.String(),
})
}
}
return l2BridgeBatchDepositEvents, nil
}
// ParseL2SingleCrossChainEventLogs parses L2 watched events
func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l2WithdrawMessages []*orm.CrossMessage
var l2RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2WithdrawETHSig:
event := backendabi.ETHMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawETH event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeETH)
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawERC20 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC20)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L2WithdrawERC721Sig:
event := backendabi.ERC721MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
case backendabi.L2BatchWithdrawERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
if err != nil {
log.Error("Failed to unpack BatchWithdrawERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
case backendabi.L2WithdrawERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L2BatchWithdrawERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
if err != nil {
log.Error("Failed to unpack BatchWithdrawERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
case backendabi.L2SentMessageEventSig:
event := backendabi.L2SentMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Error("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
from, err := getRealFromAddress(ctx, event.Sender, event.Message, e.client, vlog.TxHash, e.cfg.GatewayRouterAddr)
if err != nil {
log.Error("Failed to get real 'from' address", "err", err)
return nil, nil, err
}
l2WithdrawMessages = append(l2WithdrawMessages, &orm.CrossMessage{
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
Sender: from,
Receiver: event.Target.String(),
TokenType: int(btypes.TokenTypeETH),
L2TxHash: vlog.TxHash.String(),
TokenAmounts: event.Value.String(),
MessageFrom: event.Sender.String(),
MessageTo: event.Target.String(),
MessageValue: event.Value.String(),
MessageNonce: event.MessageNonce.Uint64(),
MessageData: hexutil.Encode(event.Message),
MessageType: int(btypes.MessageTypeL2SentMessage),
TxStatus: int(btypes.TxStatusTypeSent),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
L2BlockNumber: vlog.BlockNumber,
})
case backendabi.L2RelayedMessageEventSig:
event := backendabi.L2RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Error("Failed to unpack RelayedMessage event", "err", err)
return nil, nil, err
}
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeRelayed),
MessageType: int(btypes.MessageTypeL1SentMessage),
})
case backendabi.L2FailedRelayedMessageEventSig:
event := backendabi.L2RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "FailedRelayedMessage", vlog)
if err != nil {
log.Error("Failed to unpack FailedRelayedMessage event", "err", err)
return nil, nil, err
}
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeFailedRelayed),
MessageType: int(btypes.MessageTypeL1SentMessage),
})
}
}
return l2WithdrawMessages, l2RelayedMessages, nil
}

View File

@@ -1,337 +0,0 @@
package logic
import (
"context"
"math/big"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
// L2ReorgSafeDepth represents the number of block confirmations considered safe against L2 chain reorganizations.
// Reorganizations at this depth under normal cases are extremely unlikely.
const L2ReorgSafeDepth = 256
// L2FilterResult the L2 filter result
type L2FilterResult struct {
WithdrawMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
BridgeBatchDepositMessage []*orm.BridgeBatchDepositEvent
}
// L2FetcherLogic the L2 fetcher logic
type L2FetcherLogic struct {
cfg *config.FetcherConfig
client *ethclient.Client
addressList []common.Address
gatewayList []common.Address
parser *L2EventParser
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
l2FetcherLogicFetchedTotal *prometheus.CounterVec
}
// NewL2FetcherLogic create L2 fetcher logic
func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
addressList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
common.HexToAddress(cfg.ERC1155GatewayAddr),
common.HexToAddress(cfg.MessengerAddr),
}
gatewayList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
common.HexToAddress(cfg.ERC1155GatewayAddr),
common.HexToAddress(cfg.MessengerAddr),
common.HexToAddress(cfg.GatewayRouterAddr),
}
// Optional gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
}
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
if common.HexToAddress(cfg.BatchBridgeGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
}
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L2FetcherLogic{
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
cfg: cfg,
client: client,
addressList: addressList,
gatewayList: gatewayList,
parser: NewL2EventParser(cfg, client),
}
reg := prometheus.DefaultRegisterer
f.l2FetcherLogicFetchedTotal = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "L2_fetcher_logic_fetched_total",
Help: "The total number of events or failed txs fetched in L2 fetcher logic.",
}, []string{"type"})
return f
}
func (f *L2FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.Block, error) {
blocks, err := utils.GetBlocksInRange(ctx, f.client, from, to)
if err != nil {
log.Error("failed to get L2 blocks in range", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
for _, block := range blocks {
if block.ParentHash() != lastBlockHash {
log.Warn("L2 reorg detected", "reorg height", block.NumberU64()-1, "expected hash", block.ParentHash().String(), "local hash", lastBlockHash.String())
var resyncHeight uint64
if block.NumberU64() > L2ReorgSafeDepth+1 {
resyncHeight = block.NumberU64() - L2ReorgSafeDepth - 1
}
header, err := f.client.HeaderByNumber(ctx, new(big.Int).SetUint64(resyncHeight))
if err != nil {
log.Error("failed to get L2 header by number", "block number", resyncHeight, "err", err)
return false, 0, common.Hash{}, nil, err
}
return true, resyncHeight, header.Hash(), nil, nil
}
lastBlockHash = block.Hash()
}
return false, 0, lastBlockHash, blocks, nil
}
func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, []*orm.CrossMessage, error) {
var l2RevertedUserTxs []*orm.CrossMessage
var l2RevertedRelayedMessageTxs []*orm.CrossMessage
blockTimestampsMap := make(map[uint64]uint64)
for i := from; i <= to; i++ {
block := blocks[i-from]
blockTimestampsMap[block.NumberU64()] = block.Time()
for _, tx := range block.Transactions() {
if tx.IsL1MessageTx() {
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, nil, nil, receiptErr
}
// Check if the transaction is failed
if receipt.Status == types.ReceiptStatusFailed {
l2RevertedRelayedMessageTxs = append(l2RevertedRelayedMessageTxs, &orm.CrossMessage{
MessageHash: common.BytesToHash(crypto.Keccak256(tx.AsL1MessageTx().Data)).String(),
L2TxHash: tx.Hash().String(),
TxStatus: int(btypes.TxStatusTypeRelayTxReverted),
L2BlockNumber: receipt.BlockNumber.Uint64(),
MessageType: int(btypes.MessageTypeL1SentMessage),
})
}
continue
}
// Gateways: L2 withdrawal.
if !isTransactionToGateway(tx, f.gatewayList) {
continue
}
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, nil, nil, receiptErr
}
// Check if the transaction is failed
if receipt.Status == types.ReceiptStatusFailed {
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, signerErr := signer.Sender(tx)
if signerErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
return nil, nil, nil, signerErr
}
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
L2TxHash: tx.Hash().String(),
MessageType: int(btypes.MessageTypeL2SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L2BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(btypes.TxStatusTypeSentTxReverted),
})
}
}
}
return blockTimestampsMap, l2RevertedUserTxs, l2RevertedRelayedMessageTxs, nil
}
func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
query := ethereum.FilterQuery{
FromBlock: new(big.Int).SetUint64(from), // inclusive
ToBlock: new(big.Int).SetUint64(to), // inclusive
Addresses: f.addressList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 9)
query.Topics[0][0] = backendabi.L2WithdrawETHSig
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
query.Topics[0][2] = backendabi.L2WithdrawERC721Sig
query.Topics[0][3] = backendabi.L2WithdrawERC1155Sig
query.Topics[0][4] = backendabi.L2SentMessageEventSig
query.Topics[0][5] = backendabi.L2RelayedMessageEventSig
query.Topics[0][6] = backendabi.L2FailedRelayedMessageEventSig
query.Topics[0][7] = backendabi.L2BridgeBatchDistributeSig
query.Topics[0][8] = backendabi.L2BridgeBatchDistributeFailedSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
log.Error("Failed to filter L2 event logs", "from", from, "to", to, "err", err)
return nil, err
}
return eventLogs, nil
}
// L2Fetcher L2 fetcher
func (f *L2FetcherLogic) L2Fetcher(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, *L2FilterResult, error) {
log.Info("fetch and save L2 events", "from", from, "to", to)
isReorg, reorgHeight, blockHash, blocks, getErr := f.getBlocksAndDetectReorg(ctx, from, to, lastBlockHash)
if getErr != nil {
log.Error("L2Fetcher getBlocksAndDetectReorg failed", "from", from, "to", to, "error", getErr)
return false, 0, common.Hash{}, nil, getErr
}
if isReorg {
return isReorg, reorgHeight, blockHash, nil, nil
}
blockTimestampsMap, revertedUserTxs, revertedRelayMsgs, routerErr := f.getRevertedTxs(ctx, from, to, blocks)
if routerErr != nil {
log.Error("L2Fetcher getRevertedTxs failed", "from", from, "to", to, "error", routerErr)
return false, 0, common.Hash{}, nil, routerErr
}
eventLogs, err := f.l2FetcherLogs(ctx, from, to)
if err != nil {
log.Error("L2Fetcher l2FetcherLogs failed", "from", from, "to", to, "error", err)
return false, 0, common.Hash{}, nil, err
}
l2WithdrawMessages, l2RelayedMessages, l2BridgeBatchDepositMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L2 event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
res := L2FilterResult{
WithdrawMessages: l2WithdrawMessages,
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
OtherRevertedTxs: revertedUserTxs,
BridgeBatchDepositMessage: l2BridgeBatchDepositMessages,
}
f.updateMetrics(res)
return false, 0, blockHash, &res, nil
}
func (f *L2FetcherLogic) updateMetrics(res L2FilterResult) {
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_gateway_router_transaction").Add(float64(len(res.OtherRevertedTxs)))
for _, withdrawMessage := range res.WithdrawMessages {
switch btypes.TokenType(withdrawMessage.TokenType) {
case btypes.TokenTypeETH:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_eth").Add(1)
case btypes.TokenTypeERC20:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc20").Add(1)
case btypes.TokenTypeERC721:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc721").Add(1)
case btypes.TokenTypeERC1155:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc1155").Add(1)
}
}
for _, relayedMessage := range res.RelayedMessages {
switch btypes.TxStatusType(relayedMessage.TxStatus) {
case btypes.TxStatusTypeRelayed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_relayed_message").Add(1)
case btypes.TxStatusTypeFailedRelayed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_relayed_message").Add(1)
case btypes.TxStatusTypeRelayTxReverted:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_reverted_relayed_message_transaction").Add(1)
}
}
for _, bridgeBatchDepositMessage := range res.BridgeBatchDepositMessage {
switch btypes.TxStatusType(bridgeBatchDepositMessage.TxStatus) {
case btypes.TxStatusBridgeBatchDistribute:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_bridge_batch_distribute_message").Add(1)
case btypes.TxStatusBridgeBatchDistributeFailed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_bridge_batch_distribute_failed_message").Add(1)
}
}
}
func isTransactionToGateway(tx *types.Transaction, gatewayList []common.Address) bool {
if tx.To() == nil {
return false
}
for _, gateway := range gatewayList {
if *tx.To() == gateway {
return true
}
}
return false
}

View File

@@ -1,145 +0,0 @@
package orm
import (
"context"
"fmt"
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
btypes "scroll-tech/bridge-history-api/internal/types"
)
// BatchEvent represents a batch event.
type BatchEvent struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id;primary_key"`
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
BatchStatus int `json:"batch_status" gorm:"column:batch_status"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
BatchHash string `json:"batch_hash" gorm:"column:batch_hash"`
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
UpdateStatus int `json:"update_status" gorm:"column:update_status"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
}
// TableName returns the table name for the BatchEvent model.
func (*BatchEvent) TableName() string {
return "batch_event_v2"
}
// NewBatchEvent returns a new instance of BatchEvent.
func NewBatchEvent(db *gorm.DB) *BatchEvent {
return &BatchEvent{db: db}
}
// GetBatchEventSyncedHeightInDB returns the maximum l1_block_number from the batch_event_v2 table.
func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64, error) {
var batch BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Order("l1_block_number desc")
if err := db.First(&batch).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("failed to get batch synced height in db, error: %w", err)
}
return batch.L1BlockNumber, nil
}
// GetLastUpdatedFinalizedBlockHeight returns the last updated finalized block height in db.
func (c *BatchEvent) GetLastUpdatedFinalizedBlockHeight(ctx context.Context) (uint64, error) {
var batch BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("batch_status = ?", btypes.BatchStatusTypeFinalized)
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUpdated)
db = db.Order("batch_index desc")
if err := db.First(&batch).Error; err != nil {
if err == gorm.ErrRecordNotFound {
// No finalized batch found, return genesis batch's end block number.
return 0, nil
}
return 0, fmt.Errorf("failed to get last updated finalized block height, error: %w", err)
}
return batch.EndBlockNumber, nil
}
// GetUnupdatedFinalizedBatchesLEBlockHeight returns the finalized batches with end block <= given block height in db.
func (c *BatchEvent) GetUnupdatedFinalizedBatchesLEBlockHeight(ctx context.Context, blockHeight uint64) ([]*BatchEvent, error) {
var batches []*BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("end_block_number <= ?", blockHeight)
db = db.Where("batch_status = ?", btypes.BatchStatusTypeFinalized)
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
db = db.Order("batch_index asc")
if err := db.Find(&batches).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get unupdated finalized batches >= block height, error: %w", err)
}
return batches, nil
}
// InsertOrUpdateBatchEvents inserts a new batch event or updates an existing one based on the BatchStatusType.
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent) error {
for _, l1BatchEvent := range l1BatchEvents {
db := c.db
db = db.WithContext(ctx)
db = db.Model(&BatchEvent{})
updateFields := make(map[string]interface{})
switch btypes.BatchStatusType(l1BatchEvent.BatchStatus) {
case btypes.BatchStatusTypeCommitted:
// Use the clause to either insert or ignore on conflict
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "batch_hash"}},
DoNothing: true,
})
if err := db.Create(l1BatchEvent).Error; err != nil {
return fmt.Errorf("failed to insert or ignore batch event, error: %w", err)
}
case btypes.BatchStatusTypeFinalized:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = btypes.BatchStatusTypeFinalized
updateFields["l1_block_number"] = l1BatchEvent.L1BlockNumber
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
case btypes.BatchStatusTypeReverted:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = btypes.BatchStatusTypeReverted
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
// Soft delete the batch event.
if err := db.Delete(l1BatchEvent).Error; err != nil {
return fmt.Errorf("failed to soft delete batch event, error: %w", err)
}
}
}
return nil
}
// UpdateBatchEventStatus updates the UpdateStatusType of a BatchEvent given its batch index.
func (c *BatchEvent) UpdateBatchEventStatus(ctx context.Context, batchIndex uint64) error {
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("batch_index = ?", batchIndex)
updateFields := map[string]interface{}{
"update_status": btypes.UpdateStatusTypeUpdated,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event status, batchIndex: %d, error: %w", batchIndex, err)
}
return nil
}

View File

@@ -1,163 +0,0 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/bridge-history-api/internal/types"
)
// BridgeBatchDepositEvent represents the bridge batch deposit event.
type BridgeBatchDepositEvent struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id;primary_key"`
TokenType int `json:"token_type" gorm:"column:token_type"`
Sender string `json:"sender" gorm:"column:sender"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
TokenAmount string `json:"token_amount" gorm:"column:token_amount"`
Fee string `json:"fee" gorm:"column:fee"`
L1TokenAddress string `json:"l1_token_address" gorm:"column:l1_token_address"`
L2TokenAddress string `json:"l2_token_address" gorm:"column:l2_token_address"`
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
L2BlockNumber uint64 `json:"l2_block_number" gorm:"column:l2_block_number"`
L1TxHash string `json:"l1_tx_hash" gorm:"column:l1_tx_hash"`
L1LogIndex uint `json:"l1_log_index" gorm:"column:l1_log_index"`
L2TxHash string `json:"l2_tx_hash" gorm:"column:l2_tx_hash"`
TxStatus int `json:"tx_status" gorm:"column:tx_status"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"column:block_timestamp"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
}
// TableName returns the table name for the BridgeBatchDepositEvent model.
func (*BridgeBatchDepositEvent) TableName() string {
return "bridge_batch_deposit_event_v2"
}
// NewBridgeBatchDepositEvent returns a new instance of BridgeBatchDepositEvent.
func NewBridgeBatchDepositEvent(db *gorm.DB) *BridgeBatchDepositEvent {
return &BridgeBatchDepositEvent{db: db}
}
// GetTxsByAddress returns the txs by address
func (c *BridgeBatchDepositEvent) GetTxsByAddress(ctx context.Context, sender string) ([]*BridgeBatchDepositEvent, error) {
var messages []*BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get all txs by sender address, sender: %v, error: %w", sender, err)
}
return messages, nil
}
// GetMessagesByTxHashes retrieves all BridgeBatchDepositEvent from the database that match the provided transaction hashes.
func (c *BridgeBatchDepositEvent) GetMessagesByTxHashes(ctx context.Context, txHashes []string) ([]*BridgeBatchDepositEvent, error) {
var messages []*BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("l1_tx_hash in (?) or l2_tx_hash in (?)", txHashes, txHashes)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to GetMessagesByTxHashes by tx hashes, tx hashes: %v, error: %w", txHashes, err)
}
return messages, nil
}
// GetMessageL1SyncedHeightInDB returns the l1 latest bridge batch deposit message height from the database
func (c *BridgeBatchDepositEvent) GetMessageL1SyncedHeightInDB(ctx context.Context) (uint64, error) {
var message BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Order("l1_block_number desc")
err := db.First(&message).Error
if err != nil && errors.Is(gorm.ErrRecordNotFound, err) {
return 0, nil
}
if err != nil {
return 0, fmt.Errorf("failed to get l1 latest processed height, error: %w", err)
}
return message.L1BlockNumber, nil
}
// GetMessageL2SyncedHeightInDB returns the l2 latest bridge batch deposit message height from the database
func (c *BridgeBatchDepositEvent) GetMessageL2SyncedHeightInDB(ctx context.Context) (uint64, error) {
var message BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Order("l2_block_number desc")
err := db.First(&message).Error
if err != nil && errors.Is(gorm.ErrRecordNotFound, err) {
return 0, nil
}
if err != nil {
return 0, fmt.Errorf("failed to get l2 latest processed height, error: %w", err)
}
return message.L2BlockNumber, nil
}
// InsertOrUpdateL1BridgeBatchDepositEvent inserts or updates a new L1 BridgeBatchDepositEvent
func (c *BridgeBatchDepositEvent) InsertOrUpdateL1BridgeBatchDepositEvent(ctx context.Context, l1BatchDepositEvents []*BridgeBatchDepositEvent) error {
if len(l1BatchDepositEvents) == 0 {
return nil
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "l1_tx_hash"}, {Name: "l1_log_index"}},
DoUpdates: clause.AssignmentColumns([]string{"token_amount", "fee", "l1_block_number", "l1_token_address", "tx_status", "block_timestamp"}),
})
if err := db.Create(l1BatchDepositEvents).Error; err != nil {
return fmt.Errorf("failed to insert message, error: %w", err)
}
return nil
}
// UpdateBatchEventStatus updates the tx_status of BridgeBatchDepositEvent given batch index
func (c *BridgeBatchDepositEvent) UpdateBatchEventStatus(ctx context.Context, distributeMessage *BridgeBatchDepositEvent) error {
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("batch_index = ?", distributeMessage.BatchIndex)
db = db.Where("token_type = ?", distributeMessage.TokenType)
updateFields := map[string]interface{}{
"l2_token_address": distributeMessage.L2TokenAddress,
"l2_block_number": distributeMessage.L2BlockNumber,
"l2_tx_hash": distributeMessage.L2TxHash,
"tx_status": types.TxStatusBridgeBatchDistribute,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to UpdateBatchEventStatus, batchIndex: %d, error: %w", distributeMessage.BatchIndex, err)
}
return nil
}
// UpdateDistributeFailedStatus updates the tx_status of BridgeBatchDepositEvent given batch index and senders
func (c *BridgeBatchDepositEvent) UpdateDistributeFailedStatus(ctx context.Context, batchIndex uint64, senders []string) error {
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("batch_index = ?", batchIndex)
db = db.Where("sender in (?)", senders)
updateFields := map[string]interface{}{
"tx_status": types.TxStatusBridgeBatchDistributeFailed,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to UpdateDistributeFailedStatus, batchIndex: %d, senders:%v, error: %w", batchIndex, senders, err)
}
return nil
}

View File

@@ -1,500 +0,0 @@
package orm
import (
"context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/common"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/bridge-history-api/internal/types"
btypes "scroll-tech/bridge-history-api/internal/types"
)
// MessageQueueEvent struct represents the details of a batch event.
type MessageQueueEvent struct {
EventType btypes.MessageQueueEventType
QueueIndex uint64
// Track replay tx hash and refund tx hash.
TxHash common.Hash
// QueueTransaction only in replayMessage, to track which message is replayed.
MessageHash common.Hash
}
// CrossMessage represents a cross message.
type CrossMessage struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id;primary_key"`
MessageType int `json:"message_type" gorm:"column:message_type"`
RollupStatus int `json:"rollup_status" gorm:"column:rollup_status"`
TxStatus int `json:"tx_status" gorm:"column:tx_status"`
TokenType int `json:"token_type" gorm:"column:token_type"`
Sender string `json:"sender" gorm:"column:sender"`
Receiver string `json:"receiver" gorm:"column:receiver"`
MessageHash string `json:"message_hash" gorm:"column:message_hash"`
L1TxHash string `json:"l1_tx_hash" gorm:"column:l1_tx_hash"` // initial tx hash, if MessageType is MessageTypeL1SentMessage.
L1ReplayTxHash string `json:"l1_replay_tx_hash" gorm:"column:l1_replay_tx_hash"`
L1RefundTxHash string `json:"l1_refund_tx_hash" gorm:"column:l1_refund_tx_hash"`
L2TxHash string `json:"l2_tx_hash" gorm:"column:l2_tx_hash"` // initial tx hash, if MessageType is MessageTypeL2SentMessage.
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
L2BlockNumber uint64 `json:"l2_block_number" gorm:"column:l2_block_number"`
L1TokenAddress string `json:"l1_token_address" gorm:"column:l1_token_address"`
L2TokenAddress string `json:"l2_token_address" gorm:"column:l2_token_address"`
TokenIDs string `json:"token_ids" gorm:"column:token_ids"`
TokenAmounts string `json:"token_amounts" gorm:"column:token_amounts"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"column:block_timestamp"`
MessageFrom string `json:"message_from" gorm:"column:message_from"`
MessageTo string `json:"message_to" gorm:"column:message_to"`
MessageValue string `json:"message_value" gorm:"column:message_value"`
MessageNonce uint64 `json:"message_nonce" gorm:"column:message_nonce"`
MessageData string `json:"message_data" gorm:"column:message_data"`
MerkleProof []byte `json:"merkle_proof" gorm:"column:merkle_proof"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
}
// TableName returns the table name for the CrossMessage model.
func (*CrossMessage) TableName() string {
return "cross_message_v2"
}
// NewCrossMessage returns a new instance of CrossMessage.
func NewCrossMessage(db *gorm.DB) *CrossMessage {
return &CrossMessage{db: db}
}
// GetMessageSyncedHeightInDB returns the latest synced cross message height from the database for a given message type.
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType btypes.MessageType) (uint64, error) {
var message CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", messageType)
switch {
case messageType == btypes.MessageTypeL1SentMessage:
db = db.Order("l1_block_number desc")
case messageType == btypes.MessageTypeL2SentMessage:
db = db.Order("l2_block_number desc")
}
if err := db.First(&message).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
}
switch {
case messageType == btypes.MessageTypeL1SentMessage:
return message.L1BlockNumber, nil
case messageType == btypes.MessageTypeL2SentMessage:
return message.L2BlockNumber, nil
default:
return 0, fmt.Errorf("invalid message type: %v", messageType)
}
}
// GetL2LatestFinalizedWithdrawal returns the latest finalized L2 withdrawal from the database.
func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*CrossMessage, error) {
var message CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
db = db.Order("message_nonce desc")
if err := db.First(&message).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
}
return &message, nil
}
// GetL2WithdrawalsByBlockRange returns the L2 withdrawals by block range from the database.
func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBlock, endBlock uint64) ([]*CrossMessage, error) {
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("l2_block_number >= ?", startBlock)
db = db.Where("l2_block_number <= ?", endBlock)
db = db.Where("tx_status != ?", types.TxStatusTypeSentTxReverted)
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Order("message_nonce asc")
if err := db.Find(&messages).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
}
return messages, nil
}
// GetMessagesByTxHashes retrieves all cross messages from the database that match the provided transaction hashes.
func (c *CrossMessage) GetMessagesByTxHashes(ctx context.Context, txHashes []string) ([]*CrossMessage, error) {
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("l1_tx_hash in (?) or l2_tx_hash in (?)", txHashes, txHashes)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get L2 messages by tx hashes, tx hashes: %v, error: %w", txHashes, err)
}
return messages, nil
}
// GetL2UnclaimedWithdrawalsByAddress retrieves all L2 unclaimed withdrawal messages for a given sender address.
func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, sender string) ([]*CrossMessage, error) {
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get L2 claimable withdrawal messages by sender address, sender: %v, error: %w", sender, err)
}
return messages, nil
}
// GetL2WithdrawalsByAddress retrieves all L2 claimable withdrawal messages for a given sender address.
func (c *CrossMessage) GetL2WithdrawalsByAddress(ctx context.Context, sender string) ([]*CrossMessage, error) {
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get L2 withdrawal messages by sender address, sender: %v, error: %w", sender, err)
}
return messages, nil
}
// GetTxsByAddress retrieves all txs for a given sender address.
func (c *CrossMessage) GetTxsByAddress(ctx context.Context, sender string) ([]*CrossMessage, error) {
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get all txs by sender address, sender: %v, error: %w", sender, err)
}
return messages, nil
}
// UpdateL1MessageQueueEventsInfo updates the information about L1 message queue events in the database.
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent) error {
// update tx statuses.
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
txStatusUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case btypes.MessageQueueEventTypeQueueTransaction:
continue
case btypes.MessageQueueEventTypeDequeueTransaction:
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", types.TxStatusTypeRelayed)
db = db.Where("tx_status != ?", types.TxStatusTypeDropped)
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeSkipped
case btypes.MessageQueueEventTypeDropTransaction:
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", types.TxStatusTypeRelayed)
db = db.Where("tx_status != ?", types.TxStatusTypeDropped)
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeDropped
case btypes.MessageQueueEventTypeResetDequeuedTransaction:
db = db.Where("tx_status = ?", types.TxStatusTypeSkipped)
// reset skipped messages that the nonce is greater than or equal to the queue index.
db = db.Where("message_nonce >= ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeSent
}
if err := db.Updates(txStatusUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
}
}
// update tx hashes of replay and refund.
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
txHashUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case btypes.MessageQueueEventTypeDequeueTransaction, btypes.MessageQueueEventTypeResetDequeuedTransaction:
continue
case btypes.MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.
// replayMessage case:
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
// Transaction reverted in L2: https://sepolia.scrollscan.com/tx/0xde6ef307a7da255888aad7a4c40a6b8c886e46a8a05883070bbf18b736cbfb8c
// replayMessage: https://sepolia.etherscan.io/tx/0xa5392891232bb32d98fcdbaca0d91b4d22ef2755380d07d982eebd47b147ce28
//
// Note: update l1_tx_hash if the user calls replayMessage, cannot use queue index here,
// because in replayMessage, queue index != message nonce.
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
case btypes.MessageQueueEventTypeDropTransaction:
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txHashUpdateFields["l1_refund_tx_hash"] = l1MessageQueueEvent.TxHash.String()
}
if err := db.Updates(txHashUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
}
}
return nil
}
// UpdateBatchStatusOfL2Withdrawals updates batch status of L2 withdrawals.
func (c *CrossMessage) UpdateBatchStatusOfL2Withdrawals(ctx context.Context, startBlockNumber, endBlockNumber, batchIndex uint64) error {
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("l2_block_number >= ?", startBlockNumber)
db = db.Where("l2_block_number <= ?", endBlockNumber)
updateFields := make(map[string]interface{})
updateFields["batch_index"] = batchIndex
updateFields["rollup_status"] = btypes.RollupStatusTypeFinalized
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch status of L2 sent messages, start: %v, end: %v, index: %v, error: %w", startBlockNumber, endBlockNumber, batchIndex, err)
}
return nil
}
// UpdateBatchIndexRollupStatusMerkleProofOfL2Messages updates the batch_index, rollup_status, and merkle_proof fields for a list of L2 cross messages.
func (c *CrossMessage) UpdateBatchIndexRollupStatusMerkleProofOfL2Messages(ctx context.Context, messages []*CrossMessage) error {
if len(messages) == 0 {
return nil
}
for _, message := range messages {
updateFields := map[string]interface{}{
"batch_index": message.BatchIndex,
"rollup_status": message.RollupStatus,
"merkle_proof": message.MerkleProof,
}
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_hash = ?", message.MessageHash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update L2 message with message_hash %s, error: %w", message.MessageHash, err)
}
}
return nil
}
// InsertOrUpdateL1Messages inserts or updates a list of L1 cross messages into the database.
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage) error {
if len(messages) == 0 {
return nil
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoUpdates: clause.AssignmentColumns([]string{"sender", "receiver", "token_type", "l1_block_number", "l1_tx_hash", "l1_token_address", "l2_token_address", "token_ids", "token_amounts", "message_type", "block_timestamp", "message_nonce"}),
})
if err := db.Create(messages).Error; err != nil {
return fmt.Errorf("failed to insert message, error: %w", err)
}
return nil
}
// InsertOrUpdateL2Messages inserts or updates a list of L2 cross messages into the database.
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage) error {
if len(messages) == 0 {
return nil
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoUpdates: clause.AssignmentColumns([]string{"sender", "receiver", "token_type", "l2_block_number", "l2_tx_hash", "l1_token_address", "l2_token_address", "token_ids", "token_amounts", "message_type", "block_timestamp", "message_from", "message_to", "message_value", "message_data", "message_nonce"}),
})
if err := db.Create(messages).Error; err != nil {
return fmt.Errorf("failed to insert message, error: %w", err)
}
return nil
}
// InsertFailedL2GatewayTxs inserts a list of transactions that failed to interact with the L2 gateways into the database.
// To resolve unique index confliction, L2 tx hash is used as the MessageHash.
// The OnConflict clause is used to prevent inserting same failed transactions multiple times.
func (c *CrossMessage) InsertFailedL2GatewayTxs(ctx context.Context, messages []*CrossMessage) error {
if len(messages) == 0 {
return nil
}
for _, message := range messages {
message.MessageHash = message.L2TxHash
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoNothing: true,
})
if err := db.Create(&messages).Error; err != nil {
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
}
return nil
}
// InsertFailedL1GatewayTxs inserts a list of transactions that failed to interact with the L1 gateways into the database.
// To resolve unique index confliction, L1 tx hash is used as the MessageHash.
// The OnConflict clause is used to prevent inserting same failed transactions multiple times.
func (c *CrossMessage) InsertFailedL1GatewayTxs(ctx context.Context, messages []*CrossMessage) error {
if len(messages) == 0 {
return nil
}
for _, message := range messages {
message.MessageHash = message.L1TxHash
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoNothing: true,
})
if err := db.Create(&messages).Error; err != nil {
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
}
return nil
}
// InsertOrUpdateL2RelayedMessagesOfL1Deposits inserts or updates the database with a list of L2 relayed messages related to L1 deposits.
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage) error {
if len(l2RelayedMessages) == 0 {
return nil
}
// Deduplicate messages, for each message_hash, retaining message with the highest block number.
// This is necessary as a single message, like a FailedRelayedMessage or a reverted relayed transaction,
// may be relayed multiple times within certain block ranges, potentially leading to the error:
// "ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time (SQLSTATE 21000)".
// This happens if we attempt to insert multiple records with the same message_hash in a single db.Create operation.
// For example, see these transactions where the same message was relayed twice within certain block ranges:
// Reverted tx 1: https://sepolia.scrollscan.com/tx/0xcd6979277c3bc747445273a5e58ef1e9692fbe101d88cfefbbb69d3aef3193c0
// Reverted tx 2: https://sepolia.scrollscan.com/tx/0x43e28ed7cb71107c18c5d8ebbdb4a1d9cac73e60391d14d41e92985028faa337
// Another example:
// FailedRelayedMessage 1: https://sepolia.scrollscan.com/tx/0xfadb147fb211e5096446c5cac3ae0a8a705d2ece6c47c65135c8874f84638f17
// FailedRelayedMessage 2: https://sepolia.scrollscan.com/tx/0x6cb149b61afd07bf2e17561a59ebebde41e343b6610290c97515b2f862160b42
mergedL2RelayedMessages := make(map[string]*CrossMessage)
for _, message := range l2RelayedMessages {
if existing, found := mergedL2RelayedMessages[message.MessageHash]; found {
if types.TxStatusType(message.TxStatus) == types.TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
mergedL2RelayedMessages[message.MessageHash] = message
}
} else {
mergedL2RelayedMessages[message.MessageHash] = message
}
}
uniqueL2RelayedMessages := make([]*CrossMessage, 0, len(mergedL2RelayedMessages))
for _, msg := range mergedL2RelayedMessages {
uniqueL2RelayedMessages = append(uniqueL2RelayedMessages, msg)
}
// Do not update tx status of successfully relayed messages,
// because if a message is handled, the later relayed message tx would be reverted.
// ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L2/L2ScrollMessenger.sol#L102
// e.g.,
// Successfully relayed: https://sepolia.scrollscan.com/tx/0x4eb7cb07ba76956259c0079819a34a146f8a93dd891dc94812e9b3d66b056ec7#eventlog
// Reverted tx 1 (Reason: Message was already successfully executed): https://sepolia.scrollscan.com/tx/0x1973cafa14eb40734df30da7bfd4d9aceb53f8f26e09d96198c16d0e2e4a95fd
// Reverted tx 2 (Reason: Message was already successfully executed): https://sepolia.scrollscan.com/tx/0x02fc3a28684a590aead2482022f56281539085bd3d273ac8dedc1ceccb2bc554
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoUpdates: clause.AssignmentColumns([]string{"message_type", "l2_block_number", "l2_tx_hash", "tx_status"}),
Where: clause.Where{
Exprs: []clause.Expression{
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeDropped},
),
},
},
})
if err := db.Create(uniqueL2RelayedMessages).Error; err != nil {
return fmt.Errorf("failed to update L2 reverted relayed message of L1 deposit, error: %w", err)
}
return nil
}
// InsertOrUpdateL1RelayedMessagesOfL2Withdrawals inserts or updates the database with a list of L1 relayed messages related to L2 withdrawals.
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage) error {
if len(l1RelayedMessages) == 0 {
return nil
}
// Deduplicate messages, for each message_hash, retaining message with the highest block number.
// This is necessary as a single message, like a FailedRelayedMessage or a reverted relayed transaction,
// may be relayed multiple times within certain block ranges, potentially leading to the error:
// "ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time (SQLSTATE 21000)".
// This happens if we attempt to insert multiple records with the same message_hash in a single db.Create operation.
// For example, see these transactions where the same message was relayed twice within certain block ranges:
// FailedRelayedMessage 1: https://sepolia.etherscan.io/tx/0x28b3212cda6ca0f3790f362a780257bbe2b37417ccf75a4eca6c3a08294c8f1b#eventlog
// FailedRelayedMessage 2: https://sepolia.etherscan.io/tx/0xc8a8254825dd2cab5caef58cfd8d88c077ceadadc78f2340214a86cf8ab88543#eventlog
// Another example (relayed success, then relayed again):
// Relay Message, and success: https://sepolia.etherscan.io/tx/0xcfdf2f5446719e3e123a8aa06e4d6b3809c3850a13adf875755c8b1e423aa448#eventlog
// Relay Message again, and reverted: https://sepolia.etherscan.io/tx/0xb1fcae7546f3de4cfd0b4d679f4075adb4eb69578b12e2b5673f5f24b1836578
mergedL1RelayedMessages := make(map[string]*CrossMessage)
for _, message := range l1RelayedMessages {
if existing, found := mergedL1RelayedMessages[message.MessageHash]; found {
if types.TxStatusType(message.TxStatus) == types.TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
mergedL1RelayedMessages[message.MessageHash] = message
}
} else {
mergedL1RelayedMessages[message.MessageHash] = message
}
}
uniqueL1RelayedMessages := make([]*CrossMessage, 0, len(mergedL1RelayedMessages))
for _, msg := range mergedL1RelayedMessages {
uniqueL1RelayedMessages = append(uniqueL1RelayedMessages, msg)
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoUpdates: clause.AssignmentColumns([]string{"message_type", "l1_block_number", "l1_tx_hash", "tx_status"}),
Where: clause.Where{
Exprs: []clause.Expression{
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeDropped},
),
},
},
})
if err := db.Create(uniqueL1RelayedMessages).Error; err != nil {
return fmt.Errorf("failed to update L1 relayed message of L2 withdrawal, error: %w", err)
}
return nil
}

View File

@@ -1,61 +0,0 @@
package migrate
import (
"database/sql"
"embed"
"os"
"strconv"
"github.com/pressly/goose/v3"
)
//go:embed migrations/*.sql
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("bridge_historyv2_migrations")
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
goose.SetVerbose(verbose)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
}
// Rollback rollback to the given version
func Rollback(db *sql.DB, version *int64) error {
if version != nil {
return goose.DownTo(db, MigrationsDir, *version)
}
return goose.Down(db, MigrationsDir)
}
// ResetDB clean and migrate db.
func ResetDB(db *sql.DB) error {
if err := Rollback(db, new(int64)); err != nil {
return err
}
return Migrate(db)
}
// Current get current version
func Current(db *sql.DB) (int64, error) {
return goose.GetDBVersion(db)
}
// Status is normal or not
func Status(db *sql.DB) error {
return goose.Version(db, MigrationsDir)
}
// Create a new migration folder
func Create(db *sql.DB, name, migrationType string) error {
return goose.Create(db, MigrationsDir, name, migrationType)
}

View File

@@ -1,57 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE cross_message_v2
(
id BIGSERIAL PRIMARY KEY,
message_type SMALLINT NOT NULL,
tx_status SMALLINT NOT NULL,
rollup_status SMALLINT NOT NULL,
token_type SMALLINT NOT NULL,
sender VARCHAR NOT NULL,
receiver VARCHAR NOT NULL,
message_hash VARCHAR DEFAULT NULL, -- NULL for failed txs
l1_tx_hash VARCHAR DEFAULT NULL,
l1_replay_tx_hash VARCHAR DEFAULT NULL,
l1_refund_tx_hash VARCHAR DEFAULT NULL,
l2_tx_hash VARCHAR DEFAULT NULL,
l1_block_number BIGINT DEFAULT NULL,
l2_block_number BIGINT DEFAULT NULL,
l1_token_address VARCHAR DEFAULT NULL,
l2_token_address VARCHAR DEFAULT NULL,
token_ids VARCHAR DEFAULT NULL,
token_amounts VARCHAR NOT NULL,
block_timestamp BIGINT NOT NULL, -- timestamp to sort L1 Deposit & L2 Withdraw events altogether
--- claim info
message_from VARCHAR DEFAULT NULL,
message_to VARCHAR DEFAULT NULL,
message_value VARCHAR DEFAULT NULL,
message_nonce BIGINT DEFAULT NULL,
message_data VARCHAR DEFAULT NULL,
merkle_proof BYTEA DEFAULT NULL,
batch_index BIGINT DEFAULT NULL,
-- metadata
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_cm_message_hash ON cross_message_v2 (message_hash);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_l1_block_number ON cross_message_v2 (message_type, l1_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_l2_block_number ON cross_message_v2 (message_type, l2_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_rollup_status_message_nonce ON cross_message_v2 (message_type, rollup_status, message_nonce DESC);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_message_nonce_tx_status_l2_block_number ON cross_message_v2 (message_type, message_nonce, tx_status, l2_block_number);
CREATE INDEX IF NOT EXISTS idx_cm_l1_tx_hash ON cross_message_v2 (l1_tx_hash);
CREATE INDEX IF NOT EXISTS idx_cm_l2_tx_hash ON cross_message_v2 (l2_tx_hash);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_tx_status_sender_block_timestamp ON cross_message_v2 (message_type, tx_status, sender, block_timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_cm_message_type_sender_block_timestamp ON cross_message_v2 (message_type, sender, block_timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_cm_sender_block_timestamp ON cross_message_v2 (sender, block_timestamp DESC);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS cross_message_v2;
-- +goose StatementEnd

View File

@@ -1,29 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE batch_event_v2
(
id BIGSERIAL PRIMARY KEY,
l1_block_number BIGINT NOT NULL,
batch_status SMALLINT NOT NULL,
batch_index BIGINT NOT NULL,
batch_hash VARCHAR NOT NULL,
start_block_number BIGINT NOT NULL,
end_block_number BIGINT NOT NULL,
update_status SMALLINT NOT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS unique_idx_be_batch_hash ON batch_event_v2 (batch_hash);
CREATE INDEX IF NOT EXISTS idx_be_l1_block_number ON batch_event_v2 (l1_block_number);
CREATE INDEX IF NOT EXISTS idx_be_batch_index ON batch_event_v2 (batch_index);
CREATE INDEX IF NOT EXISTS idx_be_batch_index_batch_hash ON batch_event_v2 (batch_index, batch_hash);
CREATE INDEX IF NOT EXISTS idx_be_end_block_number_update_status_batch_status_batch_index ON batch_event_v2 (end_block_number, update_status, batch_status, batch_index);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS batch_event_v2;
-- +goose StatementEnd

View File

@@ -1,38 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE bridge_batch_deposit_event_v2
(
id BIGSERIAL PRIMARY KEY,
token_type SMALLINT NOT NULL,
sender VARCHAR NOT NULL,
batch_index BIGINT DEFAULT NULL,
token_amount VARCHAR NOT NULL,
fee VARCHAR NOT NULL,
l1_token_address VARCHAR DEFAULT NULL,
l2_token_address VARCHAR DEFAULT NULL,
l1_block_number BIGINT DEFAULT NULL,
l2_block_number BIGINT DEFAULT NULL,
l1_tx_hash VARCHAR DEFAULT NULL,
l1_log_index INTEGER DEFAULT NULL,
l2_tx_hash VARCHAR DEFAULT NULL,
tx_status SMALLINT NOT NULL,
block_timestamp BIGINT NOT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX idx_l1hash_l1logindex ON bridge_batch_deposit_event_v2 (l1_tx_hash, l1_log_index);
CREATE INDEX IF NOT EXISTS idx_bbde_batchidx_sender ON bridge_batch_deposit_event_v2 (batch_index, sender);
CREATE INDEX IF NOT EXISTS idx_bbde_l1_block_number ON bridge_batch_deposit_event_v2 (l1_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l2_block_number ON bridge_batch_deposit_event_v2 (l2_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l1_tx_hash ON bridge_batch_deposit_event_v2 (l1_tx_hash DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l2_tx_hash ON bridge_batch_deposit_event_v2 (l2_tx_hash DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_sender_block_timestamp ON bridge_batch_deposit_event_v2 (sender, block_timestamp DESC);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS bridge_batch_deposit_event_v2;
-- +goose StatementEnd

View File

@@ -1,35 +0,0 @@
package route
import (
"time"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"scroll-tech/common/observability"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/controller/api"
)
// Route routes the APIs
func Route(router *gin.Engine, conf *config.Config, reg prometheus.Registerer) {
router.Use(cors.New(cors.Config{
AllowOrigins: []string{"*"},
AllowMethods: []string{"GET", "POST"},
AllowHeaders: []string{"Origin", "Content-Type", "Authorization"},
AllowCredentials: true,
MaxAge: 12 * time.Hour,
}))
observability.Use(router, "bridge_history_api", reg)
r := router.Group("api/")
r.GET("/txs", api.TxsByAddressCtl.GetTxsByAddress)
r.GET("/l2/withdrawals", api.L2WithdrawalsByAddressCtl.GetL2WithdrawalsByAddress)
r.GET("/l2/unclaimed/withdrawals", api.L2UnclaimedWithdrawalsByAddressCtl.GetL2UnclaimedWithdrawalsByAddress)
r.POST("/txsbyhashes", api.TxsByHashesCtl.PostQueryTxsByHashes)
}

View File

@@ -1,94 +0,0 @@
package types
// TxStatusType represents the status of a transaction.
type TxStatusType int
// Constants for TxStatusType.
const (
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
// from a later status (e.g., relayed) back to "sent".
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
TxStatusTypeSent TxStatusType = iota
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
TxStatusTypeRelayed // Terminal status.
// TxStatusTypeFailedRelayed Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeFailedRelayed
// TxStatusTypeRelayTxReverted Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeRelayTxReverted
TxStatusTypeSkipped
TxStatusTypeDropped // Terminal status.
// TxStatusBridgeBatchDeposit use deposit token to bridge batch deposit contract
TxStatusBridgeBatchDeposit
// TxStatusBridgeBatchDistribute bridge batch deposit contract distribute tokens to user success
TxStatusBridgeBatchDistribute
// TxStatusBridgeBatchDistributeFailed bridge batch deposit contract distribute tokens to user failed
TxStatusBridgeBatchDistributeFailed
)
// TokenType represents the type of token.
type TokenType int
// Constants for TokenType.
const (
TokenTypeUnknown TokenType = iota
TokenTypeETH
TokenTypeERC20
TokenTypeERC721
TokenTypeERC1155
)
// MessageType represents the type of message.
type MessageType int
// Constants for MessageType.
const (
MessageTypeUnknown MessageType = iota
MessageTypeL1SentMessage
MessageTypeL2SentMessage
MessageTypeL1BatchDeposit
)
// RollupStatusType represents the status of a rollup.
type RollupStatusType int
// Constants for RollupStatusType.
const (
RollupStatusTypeUnknown RollupStatusType = iota
RollupStatusTypeFinalized // only batch finalized status is used.
)
// MessageQueueEventType represents the type of message queue event.
type MessageQueueEventType int
// Constants for MessageQueueEventType.
const (
MessageQueueEventTypeUnknown MessageQueueEventType = iota
MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction
MessageQueueEventTypeResetDequeuedTransaction
)
// BatchStatusType represents the type of batch status.
type BatchStatusType int
// Constants for BatchStatusType.
const (
BatchStatusTypeUnknown BatchStatusType = iota
BatchStatusTypeCommitted
BatchStatusTypeReverted
BatchStatusTypeFinalized
)
// UpdateStatusType represents the whether batch info is updated in message table.
type UpdateStatusType int
// Constants for UpdateStatusType.
const (
UpdateStatusTypeUnupdated UpdateStatusType = iota
UpdateStatusTypeUpdated
)

View File

@@ -1,131 +0,0 @@
package types
import (
"net/http"
"github.com/gin-gonic/gin"
)
const (
// Success indicates that the operation was successful.
Success = 0
// InternalServerError represents a fatal error occurring on the server.
InternalServerError = 500
// ErrParameterInvalidNo represents an error when the parameters are invalid.
ErrParameterInvalidNo = 40001
// ErrGetL2ClaimableWithdrawalsError represents an error when trying to get L2 claimable withdrawal transactions.
ErrGetL2ClaimableWithdrawalsError = 40002
// ErrGetL2WithdrawalsError represents an error when trying to get L2 withdrawal transactions by address.
ErrGetL2WithdrawalsError = 40003
// ErrGetTxsError represents an error when trying to get transactions by address.
ErrGetTxsError = 40004
// ErrGetTxsByHashError represents an error when trying to get transactions by hash list.
ErrGetTxsByHashError = 40005
)
// QueryByAddressRequest the request parameter of address api
type QueryByAddressRequest struct {
Address string `form:"address" binding:"required"`
Page uint64 `form:"page" binding:"required,min=1"`
PageSize uint64 `form:"page_size" binding:"required,min=1,max=100"`
}
// QueryByHashRequest the request parameter of hash api
type QueryByHashRequest struct {
Txs []string `json:"txs" binding:"required,min=1,max=100"`
}
// ResultData contains return txs and total
type ResultData struct {
Results []*TxHistoryInfo `json:"results"`
Total uint64 `json:"total"`
}
// Response the response schema
type Response struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data interface{} `json:"data"`
}
// CounterpartChainTx is the schema of counterpart chain tx info
type CounterpartChainTx struct {
Hash string `json:"hash"`
BlockNumber uint64 `json:"block_number"`
}
// ClaimInfo is the schema of tx claim info
type ClaimInfo struct {
From string `json:"from"`
To string `json:"to"`
Value string `json:"value"`
Nonce string `json:"nonce"`
Message string `json:"message"`
Proof L2MessageProof `json:"proof"`
Claimable bool `json:"claimable"`
}
// L2MessageProof is the schema of L2 message proof
type L2MessageProof struct {
BatchIndex string `json:"batch_index"`
MerkleProof string `json:"merkle_proof"`
}
// TxHistoryInfo the schema of tx history infos
type TxHistoryInfo struct {
Hash string `json:"hash"`
ReplayTxHash string `json:"replay_tx_hash"`
RefundTxHash string `json:"refund_tx_hash"`
MessageHash string `json:"message_hash"`
TokenType TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
TokenIDs []string `json:"token_ids"` // only for erc721 and erc1155
TokenAmounts []string `json:"token_amounts"` // for eth and erc20, the length is 1, for erc721 and erc1155, the length could be > 1
MessageType MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
L1TokenAddress string `json:"l1_token_address"`
L2TokenAddress string `json:"l2_token_address"`
BlockNumber uint64 `json:"block_number"`
TxStatus TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
CounterpartChainTx *CounterpartChainTx `json:"counterpart_chain_tx"`
ClaimInfo *ClaimInfo `json:"claim_info"`
BlockTimestamp uint64 `json:"block_timestamp"`
BatchDepositFee string `json:"batch_deposit_fee"` // only for bridge batch deposit
}
// RenderJSON renders response with json
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
var errMsg string
if err != nil {
errMsg = err.Error()
}
renderData := Response{
ErrCode: errCode,
ErrMsg: errMsg,
Data: data,
}
ctx.JSON(http.StatusOK, renderData)
}
// RenderSuccess renders success response with json
func RenderSuccess(ctx *gin.Context, data interface{}) {
RenderJSON(ctx, Success, nil, data)
}
// RenderFailure renders failure response with json
func RenderFailure(ctx *gin.Context, errCode int, err error) {
RenderJSON(ctx, errCode, err, nil)
}
// RenderFatal renders fatal response with json
func RenderFatal(ctx *gin.Context, err error) {
var errMsg string
if err != nil {
errMsg = err.Error()
}
renderData := Response{
ErrCode: InternalServerError,
ErrMsg: errMsg,
Data: nil,
}
ctx.Set("errcode", InternalServerError)
ctx.JSON(http.StatusInternalServerError, renderData)
}

View File

@@ -1,208 +0,0 @@
package utils
import (
"context"
"encoding/binary"
"errors"
"fmt"
"math/big"
"strings"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
backendabi "scroll-tech/bridge-history-api/abi"
)
// Keccak2 compute the keccack256 of two concatenations of bytes32
func Keccak2(a common.Hash, b common.Hash) common.Hash {
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
}
// GetBlockNumber get the current block number minus the confirmations
func GetBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
number, err := client.BlockNumber(ctx)
if err != nil || number <= confirmations {
return 0, err
}
number = number - confirmations
return number, nil
}
// UnpackLog unpacks a retrieved log into the provided output structure.
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return errors.New("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
sender common.Address,
target common.Address,
value *big.Int,
messageNonce *big.Int,
message []byte,
) common.Hash {
data, _ := backendabi.IL2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
return common.BytesToHash(crypto.Keccak256(data))
}
// GetBatchRangeFromCalldata find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
const methodIDLength = 4
if len(txData) < methodIDLength {
return 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
}
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
if err != nil {
return 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
}
values, err := method.Inputs.Unpack(txData[methodIDLength:])
if err != nil {
return 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
}
var chunks [][]byte
if method.Name == "importGenesisBatch" {
return 0, 0, nil
} else if method.Name == "commitBatch" {
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
var args commitBatchArgs
if err = method.Inputs.Copy(&args, values); err != nil {
return 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
}
chunks = args.Chunks
} else if method.Name == "commitBatchWithBlobProof" {
type commitBatchWithBlobProofArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
BlobDataProof []byte
}
var args commitBatchWithBlobProofArgs
if err = method.Inputs.Copy(&args, values); err != nil {
return 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
}
chunks = args.Chunks
}
var startBlock uint64
var finishBlock uint64
// decode blocks from chunk and assume that there's no empty chunk
// | 1 byte | 60 bytes | ... | 60 bytes |
// | num blocks | block 1 | ... | block n |
if len(chunks) == 0 {
return 0, 0, errors.New("invalid chunks")
}
chunk := chunks[0]
block := chunk[1:61] // first block in chunk
startBlock = binary.BigEndian.Uint64(block[0:8])
chunk = chunks[len(chunks)-1]
lastBlockIndex := int(chunk[0]) - 1
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
finishBlock = binary.BigEndian.Uint64(block[0:8])
return startBlock, finishBlock, err
}
// GetBlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
func GetBlocksInRange(ctx context.Context, cli *ethclient.Client, start, end uint64) ([]*types.Block, error) {
var (
eg errgroup.Group
blocks = make([]*types.Block, end-start+1)
concurrency = 32
sem = make(chan struct{}, concurrency)
)
for i := start; i <= end; i++ {
sem <- struct{}{} // Acquire a slot in the semaphore
blockNum := int64(i)
index := i - start
eg.Go(func() error {
defer func() { <-sem }() // Release the slot when done
block, err := cli.BlockByNumber(ctx, big.NewInt(blockNum))
if err != nil {
log.Error("Failed to fetch block number", "number", blockNum, "error", err)
return err
}
blocks[index] = block
return nil
})
}
if err := eg.Wait(); err != nil {
log.Error("Error waiting for block fetching routines", "error", err)
return nil, err
}
return blocks, nil
}
// ConvertBigIntArrayToString convert the big int array to string
func ConvertBigIntArrayToString(array []*big.Int) string {
stringArray := make([]string, len(array))
for i, num := range array {
stringArray[i] = num.String()
}
result := strings.Join(stringArray, ", ")
return result
}
// ConvertStringToStringArray takes a string with values separated by commas and returns a slice of strings
func ConvertStringToStringArray(s string) []string {
if s == "" {
return []string{}
}
stringParts := strings.Split(s, ",")
for i, part := range stringParts {
stringParts[i] = strings.TrimSpace(part)
}
return stringParts
}
// GetSkippedQueueIndices gets the skipped queue indices
func GetSkippedQueueIndices(startIndex uint64, skippedBitmap *big.Int) []uint64 {
var indices []uint64
for i := 0; i < 256; i++ {
index := startIndex + uint64(i)
bit := new(big.Int).Rsh(skippedBitmap, uint(i))
if bit.Bit(0) == 0 {
continue
}
indices = append(indices, index)
}
return indices
}

File diff suppressed because one or more lines are too long

View File

@@ -1,188 +0,0 @@
package utils
import (
"github.com/scroll-tech/go-ethereum/common"
)
// MaxHeight is the maximum possible height of withdrawal trie
const MaxHeight = 40
// WithdrawTrie is an append only merkle trie
type WithdrawTrie struct {
// used to rebuild the merkle tree
NextMessageNonce uint64
height int // current height of withdraw trie
branches []common.Hash
zeroes []common.Hash
}
// NewWithdrawTrie will return a new instance of WithdrawTrie
func NewWithdrawTrie() *WithdrawTrie {
zeroes := make([]common.Hash, MaxHeight)
branches := make([]common.Hash, MaxHeight)
zeroes[0] = common.Hash{}
for i := 1; i < MaxHeight; i++ {
zeroes[i] = Keccak2(zeroes[i-1], zeroes[i-1])
}
return &WithdrawTrie{
zeroes: zeroes,
branches: branches,
height: -1,
NextMessageNonce: 0,
}
}
// Initialize will initialize the merkle trie with rightest leaf node
func (w *WithdrawTrie) Initialize(currentMessageNonce uint64, msgHash common.Hash, proofBytes []byte) {
proof := decodeBytesToMerkleProof(proofBytes)
branches := recoverBranchFromProof(proof, currentMessageNonce, msgHash)
w.height = len(proof)
w.branches = branches
w.NextMessageNonce = currentMessageNonce + 1
}
// AppendMessages appends a list of new messages as leaf nodes to the rightest of the tree and returns the proofs for all messages.
// The function correctly returns the proofs for the entire tree after all messages have been inserted, not the individual proofs after each insertion.
func (w *WithdrawTrie) AppendMessages(hashes []common.Hash) [][]byte {
length := len(hashes)
if length == 0 {
return make([][]byte, 0)
}
cache := make([]map[uint64]common.Hash, MaxHeight)
for h := 0; h < MaxHeight; h++ {
cache[h] = make(map[uint64]common.Hash)
}
// cache all branches will be used later.
if w.NextMessageNonce != 0 {
index := w.NextMessageNonce
for h := 0; h <= w.height; h++ {
if index%2 == 1 {
// right child, `w.branches[h]` is the corresponding left child
// the index of left child should be `index ^ 1`.
cache[h][index^1] = w.branches[h]
}
index >>= 1
}
}
// cache all new leaves
for i := 0; i < length; i++ {
cache[0][w.NextMessageNonce+uint64(i)] = hashes[i]
}
// build withdraw trie with new hashes
minIndex := w.NextMessageNonce
maxIndex := w.NextMessageNonce + uint64(length) - 1
for h := 0; maxIndex > 0; h++ {
if minIndex%2 == 1 {
minIndex--
}
if maxIndex%2 == 0 {
cache[h][maxIndex^1] = w.zeroes[h]
}
for i := minIndex; i <= maxIndex; i += 2 {
cache[h+1][i>>1] = Keccak2(cache[h][i], cache[h][i^1])
}
minIndex >>= 1
maxIndex >>= 1
}
// update branches using hashes one by one
for i := 0; i < length; i++ {
proof := updateBranchWithNewMessage(w.zeroes, w.branches, w.NextMessageNonce, hashes[i])
w.NextMessageNonce++
w.height = len(proof)
}
proofs := make([][]byte, length)
// retrieve merkle proof from cache
for i := 0; i < length; i++ {
index := w.NextMessageNonce + uint64(i) - uint64(length)
var merkleProof []common.Hash
for h := 0; h < w.height; h++ {
merkleProof = append(merkleProof, cache[h][index^1])
index >>= 1
}
proofs[i] = encodeMerkleProofToBytes(merkleProof)
}
return proofs
}
// MessageRoot return the current root hash of withdraw trie.
func (w *WithdrawTrie) MessageRoot() common.Hash {
if w.height == -1 {
return common.Hash{}
}
return w.branches[w.height]
}
// decodeBytesToMerkleProof transfer byte array to bytes32 array. The caller should make sure the length is matched.
func decodeBytesToMerkleProof(proofBytes []byte) []common.Hash {
proof := make([]common.Hash, len(proofBytes)/32)
for i := 0; i < len(proofBytes); i += 32 {
proof[i/32] = common.BytesToHash(proofBytes[i : i+32])
}
return proof
}
// encodeMerkleProofToBytes transfer byte32 array to byte array by concatenation.
func encodeMerkleProofToBytes(proof []common.Hash) []byte {
var proofBytes []byte
for i := 0; i < len(proof); i++ {
proofBytes = append(proofBytes, proof[i][:]...)
}
return proofBytes
}
// updateBranchWithNewMessage update the branches to latest with new message and return the merkle proof for the message.
func updateBranchWithNewMessage(zeroes []common.Hash, branches []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
root := msgHash
var merkleProof []common.Hash
var height uint64
for height = 0; index > 0; height++ {
if index%2 == 0 {
// it may be used in next round.
branches[height] = root
merkleProof = append(merkleProof, zeroes[height])
// it's a left child, the right child must be null
root = Keccak2(root, zeroes[height])
} else {
// it's a right child, use previously computed hash
root = Keccak2(branches[height], root)
merkleProof = append(merkleProof, branches[height])
}
index >>= 1
}
branches[height] = root
return merkleProof
}
// recoverBranchFromProof will recover latest branches from merkle proof and message hash
func recoverBranchFromProof(proof []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
branches := make([]common.Hash, 64)
root := msgHash
var height uint64
for height = 0; index > 0; height++ {
if index%2 == 0 {
branches[height] = root
// it's a left child, the right child must be null
root = Keccak2(root, proof[height])
} else {
// it's a right child, use previously computed hash
branches[height] = proof[height]
root = Keccak2(proof[height], root)
}
index >>= 1
}
branches[height] = root
for height++; height < 64; height++ {
branches[height] = common.Hash{}
}
return branches
}

View File

@@ -1,210 +0,0 @@
package utils
import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestUpdateBranchWithNewMessage(t *testing.T) {
zeroes := make([]common.Hash, 64)
branches := make([]common.Hash, 64)
zeroes[0] = common.Hash{}
for i := 1; i < 64; i++ {
zeroes[i] = Keccak2(zeroes[i-1], zeroes[i-1])
}
updateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
if branches[0] != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") {
t.Fatalf("Invalid root, want %s, got %s", "0x0000000000000000000000000000000000000000000000000000000000000001", branches[0].Hex())
}
updateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
if branches[1] != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
t.Fatalf("Invalid root, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", branches[1].Hex())
}
updateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
if branches[2] != common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c") {
t.Fatalf("Invalid root, want %s, got %s", "0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c", branches[2].Hex())
}
updateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
if branches[2] != common.HexToHash("0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36") {
t.Fatalf("Invalid root, want %s, got %s", "0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36", branches[2].Hex())
}
}
func TestDecodeEncodeMerkleProof(t *testing.T) {
proof := decodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
if len(proof) != 4 {
t.Fatalf("proof length mismatch, want %d, got %d", 4, len(proof))
}
if proof[0] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4901") {
t.Fatalf("proof[0] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4901", proof[0].Hex())
}
if proof[1] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4902") {
t.Fatalf("proof[1] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4902", proof[0].Hex())
}
if proof[2] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4903") {
t.Fatalf("proof[2] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4903", proof[0].Hex())
}
if proof[3] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904") {
t.Fatalf("proof[3] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904", proof[0].Hex())
}
bytes := encodeMerkleProofToBytes(proof)
if common.Bytes2Hex(bytes) != "2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904" {
t.Fatalf("wrong encoded bytes")
}
}
func TestRecoverBranchFromProof(t *testing.T) {
zeroes := make([]common.Hash, 64)
branches := make([]common.Hash, 64)
zeroes[0] = common.Hash{}
for i := 1; i < 64; i++ {
zeroes[i] = Keccak2(zeroes[i-1], zeroes[i-1])
}
proof := updateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
tmpBranches := recoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = updateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
tmpBranches = recoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = updateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
tmpBranches = recoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
proof = updateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
tmpBranches = recoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
for i := 0; i < 64; i++ {
if tmpBranches[i] != branches[i] {
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
}
}
}
func TestWithdrawTrieOneByOne(t *testing.T) {
for initial := 0; initial < 128; initial++ {
withdrawTrie := NewWithdrawTrie()
var hashes []common.Hash
for i := 0; i < initial; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
withdrawTrie.AppendMessages([]common.Hash{
hash,
})
}
for i := initial; i < 128; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
expectedRoot := computeMerkleRoot(hashes)
proofBytes := withdrawTrie.AppendMessages([]common.Hash{
hash,
})
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(i+1))
assert.Equal(t, expectedRoot.String(), withdrawTrie.MessageRoot().String())
proof := decodeBytesToMerkleProof(proofBytes[0])
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
assert.Equal(t, expectedRoot.String(), verifiedRoot.String())
}
}
}
func TestWithdrawTrieMultiple(t *testing.T) {
var expectedRoots []common.Hash
{
var hashes []common.Hash
for i := 0; i < 128; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
expectedRoots = append(expectedRoots, computeMerkleRoot(hashes))
}
}
for initial := 0; initial < 100; initial++ {
var hashes []common.Hash
for i := 0; i < initial; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
hashes = append(hashes, hash)
}
for finish := initial; finish < 100; finish++ {
withdrawTrie := NewWithdrawTrie()
withdrawTrie.AppendMessages(hashes)
var newHashes []common.Hash
for i := initial; i <= finish; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
newHashes = append(newHashes, hash)
}
proofBytes := withdrawTrie.AppendMessages(newHashes)
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(finish+1))
assert.Equal(t, expectedRoots[finish].String(), withdrawTrie.MessageRoot().String())
for i := initial; i <= finish; i++ {
hash := common.BigToHash(big.NewInt(int64(i + 1)))
proof := decodeBytesToMerkleProof(proofBytes[i-initial])
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
assert.Equal(t, expectedRoots[finish].String(), verifiedRoot.String())
}
}
}
}
func verifyMerkleProof(index uint64, leaf common.Hash, proof []common.Hash) common.Hash {
root := leaf
for _, h := range proof {
if index%2 == 0 {
root = Keccak2(root, h)
} else {
root = Keccak2(h, root)
}
index >>= 1
}
return root
}
func computeMerkleRoot(hashes []common.Hash) common.Hash {
if len(hashes) == 0 {
return common.Hash{}
}
zeroHash := common.Hash{}
for {
if len(hashes) == 1 {
break
}
var newHashes []common.Hash
for i := 0; i < len(hashes); i += 2 {
if i+1 < len(hashes) {
newHashes = append(newHashes, Keccak2(hashes[i], hashes[i+1]))
} else {
newHashes = append(newHashes, Keccak2(hashes[i], zeroHash))
}
}
hashes = newHashes
zeroHash = Keccak2(zeroHash, zeroHash)
}
return hashes[0]
}

31
bridge/Makefile Normal file
View File

@@ -0,0 +1,31 @@
.PHONY: lint docker clean bridge
IMAGE_NAME=bridge
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
mock_abi:
solc --bin mock_bridge/Mock_Bridge.sol -o mock_bridge/ --overwrite
solc --abi mock_bridge/Mock_Bridge.sol -o mock_bridge/ --overwrite
abigen --bin=mock_bridge/Mock_Bridge.bin \
--abi=mock_bridge/Mock_Bridge.abi \
--pkg=mock_bridge --out=mock_bridge/Mock_Bridge.go
awk '{sub("github.com/ethereum","github.com/scroll-tech")}1' mock_bridge/Mock_Bridge.go > temp && mv temp mock_bridge/Mock_Bridge.go
bridge: ## Builds the Bridge instance.
go build -o $(PWD)/build/bin/bridge ./cmd
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
clean: ## Empty out the bin folder
@rm -rf build/bin
docker:
docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridge.Dockerfile
docker_push:
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}

68
bridge/README.md Normal file
View File

@@ -0,0 +1,68 @@
# Bridge
[![Actions Status](https://scroll-tech/bridge/workflows/Continuous%20Integration/badge.svg)](https://scroll-tech/bridge/actions)
[![codecov](https://codecov.io/gh/scroll-tech/bridge/branch/master/graph/badge.svg)](https://codecov.io/gh/scroll-tech/bridge)
This repo contains the Scroll bridge.
In addition, launching the bridge will launch a separate instance of l2geth, and sets up a communication channel
between the two, over JSON-RPC sockets.
## Dependency
+ install `abigen`
``` bash
go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
```
## Build
```bash
make clean
make bridge
```
## db operation
* init, show version, rollback, check status db
```bash
# DB_DSN: db data source name
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# DB_DRIVER: db driver name
export DB_DRIVER="postgres"
# TEST_DB_DRIVER, TEST_DB_DSN: It is required when executing db test cases
export TEST_DB_DRIVER="postgres"
export TEST_DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# init db
./build/bin/bridge reset [--config ./config.json]
# show db version
./build/bin/bridge version [--config ./config.json]
# rollback db
/build/bin/bridge rollback [--version version] [--config ./config.json]
# show db status
./build/bin/bridge status [--config ./config.json]
# migrate db
./build/bin/bridge migrate [--config ./config.json]
```
## Start
* use default ports and config.json
```bash
./build/bin/bridge --http
```
* use specified ports and config.json
```bash
./build/bin/bridge --config ./config.json --http --http.addr localhost --http.port 8290
```

64
bridge/abi/bridge_abi.go Normal file
View File

@@ -0,0 +1,64 @@
package bridgeabi
import (
"math/big"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
)
// Generated manually from abigen and only necessary events and mutable calls are kept.
// RollupMetaData contains all meta data concerning the Rollup contract.
var RollupMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"_blockHash\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"_blockHeight\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"_parentHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBlock\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"_blockHash\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"_blockHeight\",\"type\":\"uint64\"}],\"name\":\"FinalizeBlock\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"_blockHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBlock\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_deadline\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"appendMessage\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockHeight\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"gasUsed\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"internalType\":\"struct IZKRollup.BlockHeader\",\"name\":\"_header\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"caller\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"nonce\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"gas\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"struct IZKRollup.Layer2Transaction[]\",\"name\":\"_txns\",\"type\":\"tuple[]\"}],\"name\":\"commitBlock\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256[]\",\"name\":\"_proof\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"_instances\",\"type\":\"uint256[]\"}],\"name\":\"finalizeBlockWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_index\",\"type\":\"uint256\"}],\"name\":\"getMessageHashByIndex\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getNextQueueIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_blockNumber\",\"type\":\"uint256\"}],\"name\":\"layer2GasLimit\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_blockHash\",\"type\":\"bytes32\"}],\"name\":\"revertBlock\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_blockNumber\",\"type\":\"uint256\"}],\"name\":\"verifyMessageStateProof\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// L1GatewayMetaData contains all meta data concerning the L1Gateway contract.
var L1GatewayMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_l1Token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_l2Token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"DepositERC20\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"DepositETH\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_l1Token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_l2Token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"FinalizeWithdrawERC20\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"FinalizeWithdrawETH\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"counterpart\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_token\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"depositERC20\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_token\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"depositERC20\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_token\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"depositERC20AndCall\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"depositETH\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"depositETH\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"}]",
}
// L1MessengerMetaData contains all meta data concerning the L1Messenger contract.
var L1MessengerMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"msgHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"msgHash\",\"type\":\"bytes32\"}],\"name\":\"MessageDropped\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"msgHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"deadline\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_deadline\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"dropMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_deadline\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"merkleProof\",\"type\":\"bytes\"}],\"internalType\":\"struct IL1ScrollMessenger.L2MessageProof\",\"name\":\"_proof\",\"type\":\"tuple\"}],\"name\":\"relayMessageWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_deadline\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"_oldGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_newGasLimit\",\"type\":\"uint32\"}],\"name\":\"replayMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"xDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// L2GatewayMetaData contains all meta data concerning the L2Gateway contract.
var L2GatewayMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_l1Token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_l2Token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"FinalizeDepositERC20\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"FinalizeDepositETH\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_l1Token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_l2Token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"WithdrawERC20\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"WithdrawETH\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_token\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"withdrawERC20\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_token\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"withdrawERC20\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_token\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"withdrawERC20AndCall\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"withdrawETH\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"withdrawETH\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"}]",
}
// L2MessengerMetaData contains all meta data concerning the L2Messenger contract.
var L2MessengerMetaData = &bind.MetaData{
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"msgHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"msgHash\",\"type\":\"bytes32\"}],\"name\":\"MessageDropped\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"deadline\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_deadline\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"}],\"name\":\"relayMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
}
// IZKRollupBlockHeader is an auto generated low-level Go binding around an user-defined struct.
type IZKRollupBlockHeader struct {
BlockHash [32]byte
ParentHash [32]byte
BaseFee *big.Int
StateRoot [32]byte
BlockHeight uint64
GasUsed uint64
Timestamp uint64
ExtraData []byte
}
// IZKRollupLayer2Transaction is an auto generated low-level Go binding around an user-defined struct.
type IZKRollupLayer2Transaction struct {
Caller common.Address
Nonce uint64
Target common.Address
Gas uint64
GasPrice *big.Int
Value *big.Int
Data []byte
}
// IL1ScrollMessengerL2MessageProof is an auto generated low-level Go binding around an user-defined struct.
type IL1ScrollMessengerL2MessageProof struct {
BlockNumber *big.Int
MerkleProof []byte
}

View File

@@ -0,0 +1,84 @@
package bridgeabi_test
import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
bridge_abi "scroll-tech/bridge/abi"
)
func TestPackRelayMessageWithProof(t *testing.T) {
assert := assert.New(t)
l1MessengerABI, err := bridge_abi.L1MessengerMetaData.GetAbi()
assert.NoError(err)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockNumber: big.NewInt(0),
MerkleProof: make([]byte, 0),
}
_, err = l1MessengerABI.Pack("relayMessageWithProof", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0), proof)
assert.NoError(err)
}
func TestPackCommitBlock(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
assert.NoError(err)
header := bridge_abi.IZKRollupBlockHeader{
BlockHash: common.Hash{},
ParentHash: common.Hash{},
BaseFee: big.NewInt(0),
StateRoot: common.Hash{},
BlockHeight: 0,
GasUsed: 0,
Timestamp: 0,
ExtraData: make([]byte, 0),
}
txns := make([]bridge_abi.IZKRollupLayer2Transaction, 5)
for i := 0; i < 5; i++ {
txns[i] = bridge_abi.IZKRollupLayer2Transaction{
Caller: common.Address{},
Target: common.Address{},
Nonce: 0,
Gas: 0,
GasPrice: big.NewInt(0),
Value: big.NewInt(0),
Data: make([]byte, 0),
}
}
_, err = l1RollupABI.Pack("commitBlock", header, txns)
assert.NoError(err)
}
func TestPackFinalizeBlockWithProof(t *testing.T) {
assert := assert.New(t)
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
assert.NoError(err)
proof := make([]*big.Int, 10)
instance := make([]*big.Int, 10)
for i := 0; i < 10; i++ {
proof[i] = big.NewInt(0)
instance[i] = big.NewInt(0)
}
_, err = l1RollupABI.Pack("finalizeBlockWithProof", common.Hash{}, proof, instance)
assert.NoError(err)
}
func TestPackRelayMessage(t *testing.T) {
assert := assert.New(t)
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
assert.NoError(err)
_, err = l2MessengerABI.Pack("relayMessage", common.Address{}, common.Address{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), make([]byte, 0))
assert.NoError(err)
}

88
bridge/cmd/db_client.go Normal file
View File

@@ -0,0 +1,88 @@
package main
import (
"scroll-tech/bridge/config"
"github.com/jmoiron/sqlx"
"github.com/urfave/cli/v2"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func initDB(file string) (*sqlx.DB, error) {
cfg, err := config.NewConfig(file)
if err != nil {
return nil, err
}
dbCfg := cfg.DBConfig
factory, err := database.NewOrmFactory(dbCfg)
if err != nil {
return nil, err
}
log.Debug("Got db config from env", "driver name", dbCfg.DriverName, "dsn", dbCfg.DSN)
return factory.GetDB(), nil
}
// ResetDB clean or reset database.
func ResetDB(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
var version int64
err = migrate.Rollback(db.DB, &version)
if err != nil {
return err
}
log.Info("successful to reset", "init version", version)
return nil
}
// CheckDBStatus check db status
func CheckDBStatus(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
return migrate.Status(db.DB)
}
// DBVersion return the latest version
func DBVersion(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
version, err := migrate.Current(db.DB)
log.Info("show database version", "db version", version)
return err
}
// MigrateDB migrate db
func MigrateDB(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
return migrate.Migrate(db.DB)
}
// RollbackDB rollback db by version
func RollbackDB(ctx *cli.Context) error {
db, err := initDB(ctx.String(configFileFlag.Name))
if err != nil {
return err
}
version := ctx.Int64("version")
return migrate.Rollback(db.DB, &version)
}

93
bridge/cmd/flags.go Normal file
View File

@@ -0,0 +1,93 @@
package main
import "github.com/urfave/cli/v2"
var (
commonFlags = []cli.Flag{
&configFileFlag,
&verbosityFlag,
&logFileFlag,
&logJSONFormat,
&logDebugFlag,
}
// configFileFlag load json type config file.
configFileFlag = cli.StringFlag{
Name: "config",
Usage: "JSON configuration file",
Value: "./config.json",
}
// verbosityFlag log level.
verbosityFlag = cli.IntFlag{
Name: "verbosity",
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail",
Value: 3,
}
// logFileFlag decides where the logger output is sent. If this flag is left
// empty, it will log to stdout.
logFileFlag = cli.StringFlag{
Name: "log.file",
Usage: "Tells the sequencer where to write log entries",
}
logJSONFormat = cli.BoolFlag{
Name: "log.json",
Usage: "Tells the sequencer whether log format is json or not",
Value: true,
}
logDebugFlag = cli.BoolFlag{
Name: "log.debug",
Usage: "Prepends log messages with call-site location (file and line number)",
}
apiFlags = []cli.Flag{
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8290,
}
l1Flags = []cli.Flag{
&l1ChainIDFlag,
&l1UrlFlag,
}
l1ChainIDFlag = cli.IntFlag{
Name: "l1.chainID",
Usage: "l1 chain id",
Value: 4,
}
l1UrlFlag = cli.StringFlag{
Name: "l1.endpoint",
Usage: "The endpoint connect to l1chain node",
Value: "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
}
l2Flags = []cli.Flag{
&l2ChainIDFlag,
&l2UrlFlag,
}
l2ChainIDFlag = cli.IntFlag{
Name: "l2.chainID",
Usage: "l2 chain id",
Value: 53077,
}
l2UrlFlag = cli.StringFlag{
Name: "l2.endpoint",
Usage: "The endpoint connect to l2chain node",
Value: "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
}
)

191
bridge/cmd/main.go Normal file
View File

@@ -0,0 +1,191 @@
package main
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/database"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
func main() {
// Set up Bridge app info.
app := cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, commonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Flags = append(app.Flags, l1Flags...)
app.Flags = append(app.Flags, l2Flags...)
app.Before = func(ctx *cli.Context) error {
return utils.Setup(&utils.LogConfig{
LogFile: ctx.String(logFileFlag.Name),
LogJSONFormat: ctx.Bool(logJSONFormat.Name),
LogDebug: ctx.Bool(logDebugFlag.Name),
Verbosity: ctx.Int(verbosityFlag.Name),
})
}
app.Commands = []*cli.Command{
{
Name: "reset",
Usage: "Clean and reset database.",
Action: ResetDB,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "status",
Usage: "Check migration status.",
Action: CheckDBStatus,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "version",
Usage: "Display the current database version.",
Action: DBVersion,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "migrate",
Usage: "Migrate the database to the latest version.",
Action: MigrateDB,
Flags: []cli.Flag{
&configFileFlag,
},
},
{
Name: "rollback",
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
Action: RollbackDB,
Flags: []cli.Flag{
&configFileFlag,
&cli.IntFlag{
Name: "version",
Usage: "Rollback to the specified version.",
Value: 0,
},
},
},
}
// Run the sequencer.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func applyConfig(ctx *cli.Context, cfg *config.Config) {
if ctx.IsSet(l1ChainIDFlag.Name) {
cfg.L1Config.ChainID = ctx.Int64(l1ChainIDFlag.Name)
}
if ctx.IsSet(l1UrlFlag.Name) {
cfg.L1Config.Endpoint = ctx.String(l1UrlFlag.Name)
}
if ctx.IsSet(l2ChainIDFlag.Name) {
cfg.L2Config.ChainID = ctx.Int64(l2ChainIDFlag.Name)
}
if ctx.IsSet(l2UrlFlag.Name) {
cfg.L2Config.Endpoint = ctx.String(l2UrlFlag.Name)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(configFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
applyConfig(ctx, cfg)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
srv := rpc.NewServer()
apis := l2Backend.APIs()
for _, api := range apis {
if err = srv.RegisterName(api.Namespace, api.Service); err != nil {
log.Crit("register namespace failed", "namespace", api.Namespace, "error", err)
}
}
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
rpc.DefaultHTTPTimeouts,
srv)
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}

53
bridge/config.json Normal file
View File

@@ -0,0 +1,53 @@
{
"l1_config": {
"confirmations": 6,
"chain_id": 4,
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
"start_height": 0,
"relayer_config": {
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"check_pending_time": 3,
"escalate_blocks": 100,
"confirmations": 1,
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "AccessListTx"
},
"private_key": "abc123abc123abc123abc123abc123abc123abc123abc123abc123abc123abc1"
}
},
"l2_config": {
"confirmations": 1,
"chain_id": 53077,
"proof_generation_freq": 1,
"skipped_opcodes": [
"CREATE2",
"DELEGATECALL"
],
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"check_pending_time": 10,
"escalate_blocks": 100,
"confirmations": 6,
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "DynamicFeeTx"
},
"private_key": "1212121212121212121212121212121212121212121212121212121212121212"
}
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
}
}

106
bridge/config/config.go Normal file
View File

@@ -0,0 +1,106 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
"github.com/scroll-tech/go-ethereum/common"
"scroll-tech/common/utils"
db_config "scroll-tech/database"
)
// SenderConfig The config for transaction sender
type SenderConfig struct {
// The RPC endpoint of the ethereum or scroll public node.
Endpoint string `json:"endpoint"`
// The time to trigger check pending txs in sender.
CheckPendingTime uint64 `json:"check_pending_time"`
// The number of blocks to wait to escalate increase gas price of the transaction.
EscalateBlocks uint64 `json:"escalate_blocks"`
// The gap number between a block be confirmed and the latest block.
Confirmations uint64 `json:"confirmations"`
// The numerator of gas price escalate multiple.
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
// The denominator of gas price escalate multiple.
EscalateMultipleDen uint64 `json:"escalate_multiple_den"`
// The maximum gas price can be used to send transaction.
MaxGasPrice uint64 `json:"max_gas_price"`
// the transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
TxType string `json:"tx_type"`
}
// L1Config loads l1eth configuration items.
type L1Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l1 chainID.
ChainID int64 `json:"chain_id"`
// l1 eth node url.
Endpoint string `json:"endpoint"`
// The start height to sync event from layer 1
StartHeight uint64 `json:"start_height"`
// The messenger contract address deployed on layer 1 chain.
L1MessengerAddress common.Address `json:"l1_messenger_address,omitempty"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}
// L2Config loads l2geth configuration items.
type L2Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l2geth chainId.
ChainID int64 `json:"chain_id"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address,omitempty"`
// Proof generation frequency, generating proof every k blocks
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
// Skip generating proof when that opcodes appeared
SkippedOpcodes []string `json:"skipped_opcodes"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}
// RelayerConfig loads relayer configuration items.
type RelayerConfig struct {
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// MessengerContractAddress store the scroll messenger contract address.
MessengerContractAddress common.Address `json:"messenger_contract_address"`
// sender config
SenderConfig *SenderConfig `json:"sender_config"`
// The private key of the relayer
PrivateKey string `json:"private_key"`
}
// Config load configuration items.
type Config struct {
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *db_config.DBConfig `json:"db_config"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &Config{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
// cover value by env fields
cfg.DBConfig.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DBConfig.DSN)
cfg.DBConfig.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DBConfig.DriverName)
return cfg, nil
}

42
bridge/go.mod Normal file
View File

@@ -0,0 +1,42 @@
module scroll-tech/bridge
go 1.18
require (
github.com/ethereum/go-ethereum v1.10.13
github.com/gorilla/websocket v1.4.2
github.com/jmoiron/sqlx v1.3.5
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.3.0
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
)
require (
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.12 // indirect
github.com/lib/pq v1.10.6 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/russross/blackfriday/v2 v2.0.1 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

659
bridge/go.sum Normal file
View File

@@ -0,0 +1,659 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo=
github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y=
github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8=
github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4=
github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0=
github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM=
github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.10.13 h1:DEYFP9zk+Gruf3ae1JOJVhNmxK28ee+sMELPLgYTXpA=
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/iden3/go-iden3-crypto v0.0.12 h1:dXZF+R9iI07DK49LHX/EKC3jTa0O2z+TUyvxjGK7V38=
github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d h1:eh1i1M9BKPCQckNFQsV/yfazQ895hevkWr8GuqhKNrk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221012120556-b3a7c9b6917d/go.mod h1:SkQ1431r0LkrExTELsapw6JQHYpki8O1mSsObTSmBWU=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

63
bridge/l1/backend.go Normal file
View File

@@ -0,0 +1,63 @@
package l1
import (
"context"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database/orm"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
)
// Backend manage the resources and services of L1 backend.
// The backend should monitor events in layer 1 and relay transactions to layer 2
type Backend struct {
cfg *config.L1Config
watcher *Watcher
relayer *Layer1Relayer
orm orm.Layer1MessageOrm
}
// New returns a new instance of Backend.
func New(ctx context.Context, cfg *config.L1Config, orm orm.Layer1MessageOrm) (*Backend, error) {
client, err := ethclient.Dial(cfg.Endpoint)
if err != nil {
return nil, err
}
l1MessengerABI, err := bridge_abi.L1MessengerMetaData.GetAbi()
if err != nil {
log.Warn("new L1MessengerABI failed", "err", err)
return nil, err
}
relayer, err := NewLayer1Relayer(ctx, client, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, l1MessengerABI, orm)
return &Backend{
cfg: cfg,
watcher: watcher,
relayer: relayer,
orm: orm,
}, nil
}
// Start Backend module.
func (l1 *Backend) Start() error {
l1.watcher.Start()
l1.relayer.Start()
return nil
}
// Stop Backend module.
func (l1 *Backend) Stop() {
l1.watcher.Stop()
l1.relayer.Stop()
}

150
bridge/l1/relayer.go Normal file
View File

@@ -0,0 +1,150 @@
package l1
import (
"context"
"math/big"
"time"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database/orm"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/sender"
)
// Layer1Relayer is responsible for
// 1. fetch pending Layer1Message from db
// 2. relay pending message to layer 2 node
//
// Actions are triggered by new head from layer 1 geth node.
// @todo It's better to be triggered by watcher.
type Layer1Relayer struct {
ctx context.Context
client *ethclient.Client
sender *sender.Sender
db orm.Layer1MessageOrm
cfg *config.RelayerConfig
// channel used to communicate with transaction sender
confirmationCh <-chan *sender.Confirmation
l2MessengerABI *abi.ABI
stopCh chan struct{}
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1ConfirmNum int64, db orm.Layer1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
if err != nil {
log.Warn("new L2MessengerABI failed", "err", err)
return nil, err
}
prv, err := crypto.HexToECDSA(cfg.PrivateKey)
if err != nil {
log.Error("Failed to import private key from config file")
return nil, err
}
sender, err := sender.NewSender(ctx, cfg.SenderConfig, prv)
if err != nil {
log.Error("new sender failed", "err", err)
return nil, err
}
return &Layer1Relayer{
ctx: ctx,
client: ethClient,
sender: sender,
db: db,
l2MessengerABI: l2MessengerABI,
cfg: cfg,
stopCh: make(chan struct{}),
confirmationCh: sender.ConfirmChan(),
}, nil
}
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer1Relayer) ProcessSavedEvents() {
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL1UnprocessedMessages()
if err != nil {
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
return
}
if len(msgs) == 0 {
return
}
msg := msgs[0]
// @todo add support to relay multiple messages
sender := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
value, ok := big.NewInt(0).SetString(msg.Value, 10)
if !ok {
// @todo maybe panic?
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// TODO: need to skip this message by changing its status to MsgError
}
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
deadline := big.NewInt(int64(msg.Deadline))
msgNonce := big.NewInt(int64(msg.Nonce))
calldata := common.Hex2Bytes(msg.Calldata)
data, err := r.l2MessengerABI.Pack("relayMessage", sender, target, value, fee, deadline, msgNonce, calldata)
if err != nil {
log.Error("Failed to pack relayMessage", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return
}
hash, err := r.sender.SendTransaction(msg.Layer1Hash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil {
log.Error("Failed to send relayMessage tx to L2", "msg.nonce", msg.Nonce, "msg.height", msg.Height, "err", err)
return
}
log.Info("relayMessage to layer2", "layer1 hash", msg.Layer1Hash, "tx hash", hash)
err = r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.Layer1Hash, hash.String(), orm.MsgSubmitted)
if err != nil {
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.layer1hash", msg.Layer1Hash, "msg.height", msg.Height, "err", err)
}
}
// Start the relayer process
func (r *Layer1Relayer) Start() {
go func() {
// trigger by timer
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// number, err := r.client.BlockNumber(r.ctx)
// log.Info("receive header", "height", number)
r.ProcessSavedEvents()
case cfm := <-r.confirmationCh: // @todo handle db error
err := r.db.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, cfm.TxHash.String(), orm.MsgConfirmed)
if err != nil {
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
case <-r.stopCh:
return
}
}
}()
}
// Stop the relayer module, for a graceful shutdown.
func (r *Layer1Relayer) Stop() {
close(r.stopCh)
}

58
bridge/l1/relayer_test.go Normal file
View File

@@ -0,0 +1,58 @@
package l1_test
import (
"context"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/bridge/mock"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/common/utils"
)
var TEST_CONFIG = &mock.TestConfig{
L1GethTestConfig: mock.L1GethTestConfig{
HPort: 0,
WPort: 8570,
},
DbTestconfig: mock.DbTestconfig{
DbName: "testl1relayer_db",
DbPort: 5440,
DB_CONFIG: &database.DBConfig{
DriverName: utils.GetEnvWithDefault("TEST_DB_DRIVER", "postgres"),
DSN: utils.GetEnvWithDefault("TEST_DB_DSN", "postgres://postgres:123456@localhost:5440/testl1relayer_db?sslmode=disable"),
},
},
}
// TestCreateNewRelayer test create new relayer instance and stop
func TestCreateNewL1Relayer(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
l1docker := mock.NewTestL1Docker(t, TEST_CONFIG)
defer l1docker.Stop()
client, err := ethclient.Dial(l1docker.Endpoint())
assert.NoError(t, err)
dbImg := mock.GetDbDocker(t, TEST_CONFIG)
dbImg.Start()
defer dbImg.Stop()
db, err := database.NewOrmFactory(TEST_CONFIG.DB_CONFIG)
assert.NoError(t, err)
relayer, err := l1.NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
relayer.Start()
defer relayer.Stop()
}

180
bridge/l1/watcher.go Normal file
View File

@@ -0,0 +1,180 @@
package l1
import (
"context"
"math/big"
"time"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database/orm"
)
const (
// keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
sentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
)
// Watcher will listen for smart contract events from Eth L1.
type Watcher struct {
ctx context.Context
client *ethclient.Client
db orm.Layer1MessageOrm
// The number of new blocks to wait for a block to be confirmed
confirmations uint64
messengerAddress common.Address
messengerABI *abi.ABI
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
stop chan bool
}
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `watcher.Start`.
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations uint64, messengerAddress common.Address, messengerABI *abi.ABI, db orm.Layer1MessageOrm) *Watcher {
savedHeight, err := db.GetLayer1LatestWatchedHeight()
if err != nil {
log.Warn("Failed to fetch height from db", "err", err)
savedHeight = 0
}
if savedHeight < int64(startHeight) {
savedHeight = int64(startHeight)
}
stop := make(chan bool)
return &Watcher{
ctx: ctx,
client: client,
db: db,
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: messengerABI,
processedMsgHeight: uint64(savedHeight),
stop: stop,
}
}
// Start the Watcher module.
func (r *Watcher) Start() {
go func() {
// trigger by timer
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
blockNumber, err := r.client.BlockNumber(r.ctx)
if err != nil {
log.Error("Failed to get block number", "err", err)
}
if err := r.fetchContractEvent(blockNumber); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
}
case <-r.stop:
return
}
}
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (r *Watcher) Stop() {
r.stop <- true
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (r *Watcher) fetchContractEvent(blockHeight uint64) error {
fromBlock := int64(r.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(r.confirmations)
if toBlock < fromBlock {
return nil
}
// warning: uint int conversion...
query := ethereum.FilterQuery{
FromBlock: big.NewInt(fromBlock), // inclusive
ToBlock: big.NewInt(toBlock), // inclusive
Addresses: []common.Address{
r.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 1)
query.Topics[0][0] = common.HexToHash(sentMessageEventSignature)
logs, err := r.client.FilterLogs(r.ctx, query)
if err != nil {
log.Warn("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
return nil
}
log.Info("Received new L1 messages", "fromBlock", fromBlock, "toBlock", toBlock,
"cnt", len(logs))
eventLogs, err := parseBridgeEventLogs(logs, r.messengerABI)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
err = r.db.SaveLayer1Messages(r.ctx, eventLogs)
if err == nil {
r.processedMsgHeight = uint64(toBlock)
}
return err
}
func parseBridgeEventLogs(logs []types.Log, messengerABI *abi.ABI) ([]*orm.Layer1Message, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var parsedlogs []*orm.Layer1Message
for _, vLog := range logs {
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
err := messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
if err != nil {
log.Warn("Failed to unpack layer1 SentMessage event", "err", err)
return parsedlogs, err
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
parsedlogs = append(parsedlogs, &orm.Layer1Message{
Nonce: event.MessageNonce.Uint64(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer1Hash: vLog.TxHash.Hex(),
})
}
return parsedlogs, nil
}

93
bridge/l2/backend.go Normal file
View File

@@ -0,0 +1,93 @@
package l2
import (
"context"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/database"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
)
// Backend manage the resources and services of L2 backend.
// The backend should monitor events in layer 2 and relay transactions to layer 1
type Backend struct {
cfg *config.L2Config
l2Watcher *WatcherClient
relayer *Layer2Relayer
orm database.OrmFactory
}
// New returns a new instance of Backend.
func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*Backend, error) {
client, err := ethclient.Dial(cfg.Endpoint)
if err != nil {
return nil, err
}
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
if err != nil {
log.Warn("new L2MessengerABI failed", "err", err)
return nil, err
}
skippedOpcodes := make(map[string]struct{}, len(cfg.SkippedOpcodes))
for _, op := range cfg.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
proofGenerationFreq := cfg.ProofGenerationFreq
if proofGenerationFreq == 0 {
log.Warn("receive 0 proof_generation_freq, change to 1")
proofGenerationFreq = 1
}
relayer, err := NewLayer2Relayer(ctx, client, proofGenerationFreq, skippedOpcodes, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, proofGenerationFreq, skippedOpcodes, cfg.L2MessengerAddress, l2MessengerABI, orm)
return &Backend{
cfg: cfg,
l2Watcher: l2Watcher,
relayer: relayer,
orm: orm,
}, nil
}
// Start Backend module.
func (l2 *Backend) Start() error {
l2.l2Watcher.Start()
l2.relayer.Start()
return nil
}
// Stop Backend module.
func (l2 *Backend) Stop() {
l2.l2Watcher.Stop()
l2.relayer.Stop()
}
// APIs collect API modules.
func (l2 *Backend) APIs() []rpc.API {
return []rpc.API{
{
Namespace: "l2",
Version: "1.0",
Service: WatcherAPI(l2.l2Watcher),
Public: true,
},
}
}
// MockBlockResult for test case
func (l2 *Backend) MockBlockResult(blockResult *types.BlockResult) {
l2.l2Watcher.Send(blockResult)
}

95
bridge/l2/checkTrace.go Normal file
View File

@@ -0,0 +1,95 @@
package l2
import (
"fmt"
"golang.org/x/sync/errgroup"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/core/vm"
"github.com/scroll-tech/go-ethereum/log"
)
func blockTraceIsValid(trace *types.BlockResult) bool {
if trace == nil {
log.Warn("block trace is empty")
return false
}
flag := true
for _, tx := range trace.ExecutionResults {
flag = structLogResIsValid(tx.StructLogs) && flag
}
return flag
}
func structLogResIsValid(txLogs []*types.StructLogRes) bool {
res := true
for i := 0; i < len(txLogs); i++ {
txLog := txLogs[i]
flag := true
switch vm.StringToOp(txLog.Op) {
case vm.CALL, vm.CALLCODE:
flag = codeIsValid(txLog, 2) && flag
flag = stateIsValid(txLog, 2) && flag
case vm.DELEGATECALL, vm.STATICCALL:
flag = codeIsValid(txLog, 2) && flag
case vm.CREATE, vm.CREATE2:
flag = stateIsValid(txLog, 1) && flag
case vm.SLOAD, vm.SSTORE, vm.SELFBALANCE:
flag = stateIsValid(txLog, 1) && flag
case vm.SELFDESTRUCT:
flag = stateIsValid(txLog, 2) && flag
case vm.EXTCODEHASH, vm.BALANCE:
flag = stateIsValid(txLog, 1) && flag
}
res = res && flag
}
return res
}
func codeIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
log.Warn("extraData is empty", "pc", txLog.Pc, "opcode", txLog.Op)
return false
} else if len(extraData.CodeList) < n {
log.Warn("code list is too short", "opcode", txLog.Op, "expect length", n, "actual length", len(extraData.CodeList))
return false
}
return true
}
func stateIsValid(txLog *types.StructLogRes, n int) bool {
extraData := txLog.ExtraData
if extraData == nil {
log.Warn("extraData is empty", "pc", txLog.Pc, "opcode", txLog.Op)
return false
} else if len(extraData.StateList) < n {
log.Warn("stateList list is too short", "opcode", txLog.Op, "expect length", n, "actual length", len(extraData.StateList))
return false
}
return true
}
// TraceHasUnsupportedOpcodes check if exist unsupported opcodes
func TraceHasUnsupportedOpcodes(opcodes map[string]struct{}, trace *types.BlockResult) bool {
if trace == nil {
return false
}
eg := errgroup.Group{}
for _, res := range trace.ExecutionResults {
res := res
eg.Go(func() error {
for _, lg := range res.StructLogs {
if _, ok := opcodes[lg.Op]; ok {
return fmt.Errorf("unsupported opcde: %s", lg.Op)
}
}
return nil
})
}
err := eg.Wait()
return err != nil
}

522
bridge/l2/relayer.go Normal file
View File

@@ -0,0 +1,522 @@
package l2
import (
"context"
"math/big"
"strconv"
"time"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/orm"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/config"
"scroll-tech/bridge/sender"
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
//
// Actions are triggered by new head from layer 1 geth node.
// @todo It's better to be triggered by watcher.
type Layer2Relayer struct {
ctx context.Context
client *ethclient.Client
sender *sender.Sender
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
db database.OrmFactory
cfg *config.RelayerConfig
l1MessengerABI *abi.ABI
l1RollupABI *abi.ABI
// a list of processing message, indexed by layer2 hash
processingMessage map[string]string
// a list of processing block, indexed by block height
processingBlock map[string]uint64
// a list of processing proof, indexed by block height
processingProof map[string]uint64
// channel used to communicate with transaction sender
confirmationCh <-chan *sender.Confirmation
stopCh chan struct{}
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, proofGenFreq uint64, skippedOpcodes map[string]struct{}, l2ConfirmNum int64, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
l1MessengerABI, err := bridge_abi.L1MessengerMetaData.GetAbi()
if err != nil {
log.Error("Get L1MessengerABI failed", "err", err)
return nil, err
}
l1RollupABI, err := bridge_abi.RollupMetaData.GetAbi()
if err != nil {
log.Error("Get RollupABI failed", "err", err)
return nil, err
}
prv, err := crypto.HexToECDSA(cfg.PrivateKey)
if err != nil {
log.Error("Failed to import private key from config file")
return nil, err
}
// @todo use different sender for relayer, block commit and proof finalize
sender, err := sender.NewSender(ctx, cfg.SenderConfig, prv)
if err != nil {
log.Error("Failed to create sender", "err", err)
return nil, err
}
return &Layer2Relayer{
ctx: ctx,
client: ethClient,
sender: sender,
db: db,
l1MessengerABI: l1MessengerABI,
l1RollupABI: l1RollupABI,
cfg: cfg,
proofGenerationFreq: proofGenFreq,
skippedOpcodes: skippedOpcodes,
processingMessage: map[string]string{},
processingBlock: map[string]uint64{},
processingProof: map[string]uint64{},
stopCh: make(chan struct{}),
confirmationCh: sender.ConfirmChan(),
}, nil
}
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() {
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2UnprocessedMessages()
if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return
}
if len(msgs) == 0 {
return
}
msg := msgs[0]
// @todo add support to relay multiple messages
latestFinalizeHeight, err := r.db.GetLatestFinalizedBlock()
if err != nil {
log.Error("GetLatestFinalizedBlock failed", "err", err)
return
}
if latestFinalizeHeight < msg.Height {
// log.Warn("corresponding block not finalized", "status", status)
return
}
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockNumber: big.NewInt(int64(msg.Height)),
MerkleProof: make([]byte, 0),
}
sender := common.HexToAddress(msg.Sender)
target := common.HexToAddress(msg.Target)
value, ok := big.NewInt(0).SetString(msg.Value, 10)
if !ok {
// @todo maybe panic?
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
// TODO: need to skip this message by changing its status to MsgError
}
fee, _ := big.NewInt(0).SetString(msg.Fee, 10)
deadline := big.NewInt(int64(msg.Deadline))
msgNonce := big.NewInt(int64(msg.Nonce))
calldata := common.Hex2Bytes(msg.Calldata)
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", sender, target, value, fee, deadline, msgNonce, calldata, proof)
if err != nil {
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
// TODO: need to skip this message by changing its status to MsgError
return
}
hash, err := r.sender.SendTransaction(msg.Layer2Hash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil {
log.Error("Failed to send relayMessageWithProof tx to L1", "err", err)
return
}
log.Info("relayMessageWithProof to layer1", "layer2hash", msg.Layer2Hash, "txhash", hash.String())
// save status in db
// @todo handle db error
err = r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.Layer2Hash, hash.String(), orm.MsgSubmitted)
if err != nil {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "layer2hash", msg.Layer2Hash, "err", err)
}
r.processingMessage[msg.Layer2Hash] = msg.Layer2Hash
}
// ProcessPendingBlocks submit block data to layer rollup contract
func (r *Layer2Relayer) ProcessPendingBlocks() {
// blocks are sorted by height in increasing order
blocksInDB, err := r.db.GetPendingBlocks()
if err != nil {
log.Error("Failed to fetch pending L2 blocks", "err", err)
return
}
if len(blocksInDB) == 0 {
return
}
height := blocksInDB[0]
// @todo add support to relay multiple blocks
// will fetch missing block result from l2geth
trace, err := r.getOrFetchBlockResultByHeight(height)
if err != nil {
log.Error("getOrFetchBlockResultByHeight failed",
"height", height,
"err", err,
)
return
}
if trace == nil {
return
}
parentHash, err := r.getOrFetchBlockHashByHeight(height - 1)
if err != nil {
log.Error("getOrFetchBlockHashByHeight for parent block failed",
"parent height", height-1,
"err", err,
)
return
}
if parentHash == nil {
log.Error("parent hash is empty",
"height", height,
"err", err,
)
return
}
header := bridge_abi.IZKRollupBlockHeader{
BlockHash: trace.BlockTrace.Hash,
ParentHash: *parentHash,
BaseFee: trace.BlockTrace.BaseFee.ToInt(),
StateRoot: trace.StorageTrace.RootAfter,
BlockHeight: trace.BlockTrace.Number.ToInt().Uint64(),
GasUsed: 0,
Timestamp: trace.BlockTrace.Time,
ExtraData: make([]byte, 0),
}
txns := make([]bridge_abi.IZKRollupLayer2Transaction, len(trace.BlockTrace.Transactions))
for i, tx := range trace.BlockTrace.Transactions {
txns[i] = bridge_abi.IZKRollupLayer2Transaction{
Caller: tx.From,
Nonce: tx.Nonce,
Gas: tx.Gas,
GasPrice: tx.GasPrice.ToInt(),
Value: tx.Value.ToInt(),
Data: common.Hex2Bytes(tx.Data),
}
if tx.To != nil {
txns[i].Target = *tx.To
}
header.GasUsed += trace.ExecutionResults[i].Gas
}
data, err := r.l1RollupABI.Pack("commitBlock", header, txns)
if err != nil {
log.Error("Failed to pack commitBlock", "height", height, "err", err)
return
}
hash, err := r.sender.SendTransaction(strconv.FormatUint(height, 10), &r.cfg.RollupContractAddress, big.NewInt(0), data)
if err != nil {
log.Error("Failed to send commitBlock tx to layer1 ", "height", height, "err", err)
return
}
log.Info("commitBlock in layer1", "height", height, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateRollupTxHashAndStatus(r.ctx, height, hash.String(), orm.RollupCommitting)
if err != nil {
log.Error("UpdateRollupTxHashAndStatus failed", "height", height, "err", err)
}
r.processingBlock[strconv.FormatUint(height, 10)] = height
}
// ProcessCommittedBlocks submit proof to layer rollup contract
func (r *Layer2Relayer) ProcessCommittedBlocks() {
// blocks are sorted by height in increasing order
blocksInDB, err := r.db.GetCommittedBlocks()
if err != nil {
log.Error("Failed to fetch committed L2 blocks", "err", err)
return
}
if len(blocksInDB) == 0 {
return
}
height := blocksInDB[0]
// @todo add support to relay multiple blocks
status, err := r.db.GetBlockStatusByNumber(height)
if err != nil {
log.Error("GetBlockStatusByNumber failed", "height", height, "err", err)
return
}
switch status {
case orm.BlockUnassigned, orm.BlockAssigned:
// The proof for this block is not ready yet.
return
case orm.BlockProved:
// It's an intermediate state. The roller manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case orm.BlockFailed, orm.BlockSkipped:
if err = r.db.UpdateRollupStatus(r.ctx, height, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "height", height, "err", err)
}
case orm.BlockVerified:
log.Info("Start to roll up zk proof", "height", height)
success := false
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to FinalizationSkipped", "height", height)
if err = r.db.UpdateRollupStatus(r.ctx, height, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "height", height, "err", err)
}
}
}()
proofBuffer, instanceBuffer, err := r.db.GetVerifiedProofAndInstanceByNumber(height)
if err != nil {
log.Warn("fetch get proof by height failed", "height", height, "err", err)
return
}
if proofBuffer == nil || instanceBuffer == nil {
log.Warn("proof or instance not ready", "height", height)
return
}
if len(proofBuffer)%32 != 0 {
log.Error("proof buffer has wrong length", "height", height, "length", len(proofBuffer))
return
}
if len(instanceBuffer)%32 != 0 {
log.Warn("instance buffer has wrong length", "height", height, "length", len(instanceBuffer))
return
}
proof := bufferToUint256Le(proofBuffer)
instance := bufferToUint256Le(instanceBuffer)
// it must in db
hash, err := r.db.GetHashByNumber(height)
if err != nil {
log.Warn("fetch missing block result by height failed", "height", height, "err", err)
}
if hash == nil {
// only happen when trace validate failed
return
}
data, err := r.l1RollupABI.Pack("finalizeBlockWithProof", hash, proof, instance)
if err != nil {
log.Error("Pack finalizeBlockWithProof failed", err)
return
}
txHash, err := r.sender.SendTransaction(strconv.FormatUint(height, 10), &r.cfg.RollupContractAddress, big.NewInt(0), data)
hash = &txHash
if err != nil {
log.Error("finalizeBlockWithProof in layer1 failed",
"height", height,
"err", err,
)
return
}
log.Info("finalizeBlockWithProof in layer1", "height", height, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndStatus(r.ctx, height, hash.String(), orm.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndStatus failed", "height", height, "err", err)
}
success = true
r.processingProof[strconv.FormatUint(height, 10)] = height
default:
log.Error("encounter unreachable case in ProcessCommittedBlocks",
"block_status", status,
)
}
}
// Start the relayer process
func (r *Layer2Relayer) Start() {
go func() {
// trigger by timer
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
r.ProcessSavedEvents()
r.ProcessPendingBlocks()
r.ProcessCommittedBlocks()
case confirmation := <-r.confirmationCh:
r.handleConfirmation(confirmation)
case <-r.stopCh:
return
}
}
}()
}
// Stop the relayer module, for a graceful shutdown.
func (r *Layer2Relayer) Stop() {
close(r.stopCh)
}
func (r *Layer2Relayer) getOrFetchBlockHashByHeight(height uint64) (*common.Hash, error) {
hash, err := r.db.GetHashByNumber(height)
if err != nil {
block, err := r.client.BlockByNumber(r.ctx, big.NewInt(int64(height)))
if err != nil {
return nil, err
}
x := block.Hash()
return &x, err
}
return hash, nil
}
func (r *Layer2Relayer) getOrFetchBlockResultByHeight(height uint64) (*types.BlockResult, error) {
tracesInDB, err := r.db.GetBlockResults(map[string]interface{}{"number": height})
if err != nil {
log.Warn("GetBlockResults failed", "height", height, "err", err)
return nil, err
}
if len(tracesInDB) == 0 {
return r.fetchMissingBlockResultByHeight(height)
}
return tracesInDB[0], nil
}
func (r *Layer2Relayer) fetchMissingBlockResultByHeight(height uint64) (*types.BlockResult, error) {
header, err := r.client.HeaderByNumber(r.ctx, big.NewInt(int64(height)))
if err != nil {
return nil, err
}
trace, err := r.client.GetBlockResultByHash(r.ctx, header.Hash())
if err != nil {
return nil, err
}
if blockTraceIsValid(trace) {
// skip verify for unsupported block
skip := false
if height%r.proofGenerationFreq != 0 {
log.Info("skip proof generation", "block", height)
skip = true
} else if TraceHasUnsupportedOpcodes(r.skippedOpcodes, trace) {
log.Info("block has unsupported opcodes, skip proof generation", "block", height)
skip = true
}
if skip {
if err = r.db.InsertBlockResultsWithStatus(r.ctx, []*types.BlockResult{trace}, orm.BlockSkipped); err != nil {
log.Error("failed to store missing blockResult", "err", err)
}
} else {
if err = r.db.InsertBlockResultsWithStatus(r.ctx, []*types.BlockResult{trace}, orm.BlockUnassigned); err != nil {
log.Error("failed to store missing blockResult", "err", err)
}
}
return trace, nil
}
return nil, nil
}
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is message relay transaction
if layer2Hash, ok := r.processingMessage[confirmation.ID]; ok {
transactionType = "MessageRelay"
// @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, layer2Hash, confirmation.TxHash.String(), orm.MsgConfirmed)
if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "err", err)
}
delete(r.processingMessage, confirmation.ID)
}
// check whether it is block commitment transaction
if blockHeight, ok := r.processingBlock[confirmation.ID]; ok {
transactionType = "BlockCommitment"
// @todo handle db error
err := r.db.UpdateRollupTxHashAndStatus(r.ctx, blockHeight, confirmation.TxHash.String(), orm.RollupCommitted)
if err != nil {
log.Warn("UpdateRollupTxHashAndStatus failed", "err", err)
}
delete(r.processingBlock, confirmation.ID)
}
// check whether it is proof finalization transaction
if blockHeight, ok := r.processingProof[confirmation.ID]; ok {
transactionType = "ProofFinalization"
// @todo handle db error
err := r.db.UpdateFinalizeTxHashAndStatus(r.ctx, blockHeight, confirmation.TxHash.String(), orm.RollupFinalized)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndStatus failed", "err", err)
}
delete(r.processingProof, confirmation.ID)
// try to delete block trace
err = r.db.DeleteTraceByNumber(blockHeight)
if err != nil {
log.Warn("DeleteTraceByNumber failed", "err", err)
}
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
//nolint:unused
func bufferToUint256Be(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
buffer256[i] = big.NewInt(0)
for j := 0; j < 32; j++ {
buffer256[i] = buffer256[i].Lsh(buffer256[i], 8)
buffer256[i] = buffer256[i].Add(buffer256[i], big.NewInt(int64(buffer[i*32+j])))
}
}
return buffer256
}
func bufferToUint256Le(buffer []byte) []*big.Int {
buffer256 := make([]*big.Int, len(buffer)/32)
for i := 0; i < len(buffer)/32; i++ {
v := big.NewInt(0)
shft := big.NewInt(1)
for j := 0; j < 32; j++ {
v = new(big.Int).Add(v, new(big.Int).Mul(shft, big.NewInt(int64(buffer[i*32+j]))))
shft = new(big.Int).Mul(shft, big.NewInt(256))
}
buffer256[i] = v
}
return buffer256
}

234
bridge/l2/relayer_test.go Normal file
View File

@@ -0,0 +1,234 @@
package l2_test
import (
"context"
"encoding/json"
"os"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
"scroll-tech/database/orm"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/mock"
)
var (
templateLayer2Message = []*orm.Layer2Message{
{
Nonce: 1,
Height: 1,
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
Value: "100",
Fee: "100",
GasLimit: 11529940,
Deadline: uint64(time.Now().Unix()),
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
Calldata: "testdata",
Layer2Hash: "hash0",
},
}
l1Docker docker.ImgInstance
l2Docker docker.ImgInstance
dbDocker docker.ImgInstance
)
func setupEnv(t *testing.T) {
l1Docker = mock.NewTestL1Docker(t, TEST_CONFIG)
l2Docker = mock.NewTestL2Docker(t, TEST_CONFIG)
dbDocker = mock.GetDbDocker(t, TEST_CONFIG)
}
func TestRelayerFunction(t *testing.T) {
// Setup
setupEnv(t)
t.Run("TestCreateNewRelayer", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
db, err := database.NewOrmFactory(TEST_CONFIG.DB_CONFIG)
assert.NoError(t, err)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
relayer.Start()
defer relayer.Stop()
})
t.Run("TestL2RelayerProcessSaveEvents", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
err = db.SaveLayer2Messages(context.Background(), templateLayer2Message)
assert.NoError(t, err)
blocks := []*orm.RollupResult{
{
Number: 3,
Status: orm.RollupFinalized,
RollupTxHash: "Rollup Test Hash",
FinalizeTxHash: "Finalized Hash",
},
}
err = db.InsertPendingBlocks(context.Background(), []uint64{uint64(blocks[0].Number)})
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), uint64(blocks[0].Number), orm.RollupFinalized)
assert.NoError(t, err)
relayer.ProcessSavedEvents()
msg, err := db.GetLayer2MessageByNonce(templateLayer2Message[0].Nonce)
assert.NoError(t, err)
assert.Equal(t, orm.MsgSubmitted, msg.Status)
})
t.Run("TestL2RelayerProcessPendingBlocks", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
// this blockresult has number of 0x4, need to change it to match the testcase
// In this testcase scenario, db will store two blocks with height 0x4 and 0x3
var results []*types.BlockResult
templateBlockResult, err := os.ReadFile("../../common/testdata/blockResult_relayer_parent.json")
assert.NoError(t, err)
blockResult := &types.BlockResult{}
err = json.Unmarshal(templateBlockResult, blockResult)
assert.NoError(t, err)
results = append(results, blockResult)
templateBlockResult, err = os.ReadFile("../../common/testdata/blockResult_relayer.json")
assert.NoError(t, err)
blockResult = &types.BlockResult{}
err = json.Unmarshal(templateBlockResult, blockResult)
assert.NoError(t, err)
results = append(results, blockResult)
err = db.InsertBlockResultsWithStatus(context.Background(), results, orm.BlockUnassigned)
assert.NoError(t, err)
blocks := []*orm.RollupResult{
{
Number: 4,
Status: 1,
RollupTxHash: "Rollup Test Hash",
FinalizeTxHash: "Finalized Hash",
},
}
err = db.InsertPendingBlocks(context.Background(), []uint64{uint64(blocks[0].Number)})
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), uint64(blocks[0].Number), orm.RollupPending)
assert.NoError(t, err)
relayer.ProcessPendingBlocks()
// Check if Rollup Result is changed successfully
status, err := db.GetRollupStatus(uint64(blocks[0].Number))
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
})
t.Run("TestL2RelayerProcessCommittedBlocks", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1Docker.Endpoint()
client, err := ethclient.Dial(l2Docker.Endpoint())
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
relayer, err := l2.NewLayer2Relayer(context.Background(), client, cfg.L2Config.ProofGenerationFreq, skippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
templateBlockResult, err := os.ReadFile("../../common/testdata/blockResult_relayer.json")
assert.NoError(t, err)
blockResult := &types.BlockResult{}
err = json.Unmarshal(templateBlockResult, blockResult)
assert.NoError(t, err)
err = db.InsertBlockResultsWithStatus(context.Background(), []*types.BlockResult{blockResult}, orm.BlockVerified)
assert.NoError(t, err)
blocks := []*orm.RollupResult{
{
Number: 4,
Status: 1,
RollupTxHash: "Rollup Test Hash",
FinalizeTxHash: "Finalized Hash",
},
}
err = db.InsertPendingBlocks(context.Background(), []uint64{uint64(blocks[0].Number)})
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), uint64(blocks[0].Number), orm.RollupCommitted)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tStateProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByNumber(context.Background(), uint64(blocks[0].Number), tProof, tStateProof, 100)
assert.NoError(t, err)
relayer.ProcessCommittedBlocks()
status, err := db.GetRollupStatus(uint64(blocks[0].Number))
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
})
// Teardown
t.Cleanup(func() {
assert.NoError(t, l1Docker.Stop())
assert.NoError(t, l2Docker.Stop())
assert.NoError(t, dbDocker.Stop())
})
}

265
bridge/l2/watcher.go Normal file
View File

@@ -0,0 +1,265 @@
package l2
import (
"context"
"fmt"
"math/big"
"time"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/orm"
)
const (
// BufferSize for the BlockResult channel
BufferSize = 16
// BlockResultCacheSize for the latest handled blockresults in memory.
BlockResultCacheSize = 64
// keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
sentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
)
// WatcherClient provide APIs which support others to subscribe to various event from l2geth
type WatcherClient struct {
ctx context.Context
event.Feed
*ethclient.Client
orm database.OrmFactory
confirmations uint64
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
messengerAddress common.Address
messengerABI *abi.ABI
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
stopped uint64
stopCh chan struct{}
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, proofGenFreq uint64, skippedOpcodes map[string]struct{}, messengerAddress common.Address, messengerABI *abi.ABI, orm database.OrmFactory) *WatcherClient {
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
if err != nil {
log.Warn("fetch height from db failed", "err", err)
savedHeight = 0
}
return &WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
processedMsgHeight: uint64(savedHeight),
confirmations: confirmations,
proofGenerationFreq: proofGenFreq,
skippedOpcodes: skippedOpcodes,
messengerAddress: messengerAddress,
messengerABI: messengerABI,
stopCh: make(chan struct{}),
stopped: 0,
}
}
// Start the Listening process
func (w *WatcherClient) Start() {
go func() {
if w.orm == nil {
panic("must run L2 watcher with DB")
}
// trigger by timer
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// get current height
number, err := w.BlockNumber(w.ctx)
if err != nil {
log.Error("Failed to get_BlockNumber", "err", err)
continue
}
if err = w.tryFetchRunningMissingBlocks(w.ctx, number); err != nil {
log.Error("Failed to fetchRunningMissingBlocks", "err", err)
}
// @todo handle error
err = w.fetchContractEvent(number)
if err != nil {
log.Error("Failed to fetchContractEvent", "err", err)
}
case <-w.stopCh:
return
}
}
}()
}
// Stop the Watcher module, for a graceful shutdown.
func (w *WatcherClient) Stop() {
w.stopCh <- struct{}{}
}
// try fetch missing blocks if inconsistent
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, backTrackFrom uint64) error {
// Get newest block in DB. must have blocks at that time.
// Don't use "block_result" table "content" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetBlockResultsLatestHeight()
if err != nil {
return fmt.Errorf("Failed to GetBlockResults in DB: %v", err)
}
backTrackTo := uint64(0)
if heightInDB > 0 {
backTrackTo = uint64(heightInDB)
}
// start backtracking
heights := []uint64{}
toSkipped := []*types.BlockResult{}
toUnassigned := []*types.BlockResult{}
var (
header *types.Header
trace *types.BlockResult
)
for number := backTrackFrom; number > backTrackTo; number-- {
header, err = w.HeaderByNumber(ctx, big.NewInt(int64(number)))
if err != nil {
return fmt.Errorf("Failed to get HeaderByNumber: %v. number: %v", err, number)
}
trace, err = w.GetBlockResultByHash(ctx, header.Hash())
if err != nil {
return fmt.Errorf("Failed to GetBlockResultByHash: %v. number: %v", err, number)
}
log.Info("Retrieved block result", "height", header.Number, "hash", header.Hash())
heights = append(heights, number)
if number%w.proofGenerationFreq != 0 {
toSkipped = append(toSkipped, trace)
} else if TraceHasUnsupportedOpcodes(w.skippedOpcodes, trace) {
toSkipped = append(toSkipped, trace)
} else {
toUnassigned = append(toUnassigned, trace)
}
}
if len(heights) > 0 {
if err = w.orm.InsertPendingBlocks(ctx, heights); err != nil {
return fmt.Errorf("Failed to batch insert PendingBlocks: %v", err)
}
}
if len(toSkipped) > 0 {
if err = w.orm.InsertBlockResultsWithStatus(ctx, toSkipped, orm.BlockSkipped); err != nil {
return fmt.Errorf("failed to batch insert PendingBlocks: %v", err)
}
}
if len(toUnassigned) > 0 {
if err = w.orm.InsertBlockResultsWithStatus(ctx, toUnassigned, orm.BlockUnassigned); err != nil {
return fmt.Errorf("failed to batch insert PendingBlocks: %v", err)
}
}
return nil
}
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(w.confirmations)
if toBlock < fromBlock {
return nil
}
// warning: uint int conversion...
query := ethereum.FilterQuery{
FromBlock: big.NewInt(fromBlock), // inclusive
ToBlock: big.NewInt(toBlock), // inclusive
Addresses: []common.Address{
w.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 1)
query.Topics[0][0] = common.HexToHash(sentMessageEventSignature)
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
return nil
}
log.Info("Received new L2 messages", "fromBlock", fromBlock, "toBlock", toBlock,
"cnt", len(logs))
eventLogs, err := parseBridgeEventLogs(logs, w.messengerABI)
if err != nil {
log.Error("Failed to parse emitted event log", "err", err)
return err
}
err = w.orm.SaveLayer2Messages(w.ctx, eventLogs)
if err == nil {
w.processedMsgHeight = uint64(toBlock)
}
return err
}
func parseBridgeEventLogs(logs []types.Log, messengerABI *abi.ABI) ([]*orm.Layer2Message, error) {
// Need use contract abi to parse event Log
// Can only be tested after we have our contracts set up
var parsedlogs []*orm.Layer2Message
for _, vLog := range logs {
event := struct {
Target common.Address
Sender common.Address
Value *big.Int // uint256
Fee *big.Int // uint256
Deadline *big.Int // uint256
Message []byte
MessageNonce *big.Int // uint256
GasLimit *big.Int // uint256
}{}
err := messengerABI.UnpackIntoInterface(&event, "SentMessage", vLog.Data)
if err != nil {
log.Error("Failed to unpack layer2 SentMessage event", "err", err)
return parsedlogs, err
}
// target is in topics[1]
event.Target = common.HexToAddress(vLog.Topics[1].String())
parsedlogs = append(parsedlogs, &orm.Layer2Message{
Nonce: event.MessageNonce.Uint64(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
Fee: event.Fee.String(),
GasLimit: event.GasLimit.Uint64(),
Deadline: event.Deadline.Uint64(),
Target: event.Target.String(),
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
}
return parsedlogs, nil
}

33
bridge/l2/watcher_api.go Normal file
View File

@@ -0,0 +1,33 @@
package l2
import (
"errors"
"github.com/scroll-tech/go-ethereum/rpc"
)
// WatcherAPI watcher api service
type WatcherAPI interface {
ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error)
}
// ReplayBlockResultByHash temporary interface for easy testing.
func (r *WatcherClient) ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error) {
orm := r.orm
params := make(map[string]interface{})
if number, ok := blockNrOrHash.Number(); ok {
params["number"] = int64(number)
}
if hash, ok := blockNrOrHash.Hash(); ok {
params["hash"] = hash.String()
}
if len(params) == 0 {
return false, errors.New("empty params")
}
trace, err := orm.GetBlockResults(params)
if err != nil {
return false, err
}
r.Send(&trace[0])
return true, nil
}

319
bridge/l2/watcher_test.go Normal file
View File

@@ -0,0 +1,319 @@
package l2_test
import (
"context"
"crypto/ecdsa"
"encoding/json"
"math/big"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
bridge_abi "scroll-tech/bridge/abi"
"scroll-tech/bridge/l2"
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/database"
db_config "scroll-tech/database"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock"
"scroll-tech/bridge/mock_bridge"
)
const TEST_BUFFER = 500
var TEST_CONFIG = &mock.TestConfig{
L1GethTestConfig: mock.L1GethTestConfig{
HPort: 0,
WPort: 8571,
},
L2GethTestConfig: mock.L2GethTestConfig{
HPort: 0,
WPort: 8567,
},
DbTestconfig: mock.DbTestconfig{
DbName: "testwatcher_db",
DbPort: 5438,
DB_CONFIG: &db_config.DBConfig{
DriverName: utils.GetEnvWithDefault("TEST_DB_DRIVER", "postgres"),
DSN: utils.GetEnvWithDefault("TEST_DB_DSN", "postgres://postgres:123456@localhost:5438/testwatcher_db?sslmode=disable"),
},
},
}
var (
// previousHeight store previous chain height
previousHeight uint64
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
l2Backend *l2.Backend
)
func setenv(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
l1gethImg = mock.NewTestL1Docker(t, TEST_CONFIG)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
l2Backend, l2gethImg, dbImg = mock.L2gethDocker(t, cfg, TEST_CONFIG)
}
func TestWatcherFunction(t *testing.T) {
setenv(t)
t.Run("TestL2Backend", func(t *testing.T) {
err := l2Backend.Start()
assert.NoError(t, err)
l2Backend.Stop()
})
t.Run("TestCreateNewWatcherAndStop", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
messengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
assert.NoError(t, err)
l2db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
skippedOpcodes := make(map[string]struct{}, len(cfg.L2Config.SkippedOpcodes))
for _, op := range cfg.L2Config.SkippedOpcodes {
skippedOpcodes[op] = struct{}{}
}
proofGenerationFreq := cfg.L2Config.ProofGenerationFreq
if proofGenerationFreq == 0 {
proofGenerationFreq = 1
}
rc := l2.NewL2WatcherClient(context.Background(), client, cfg.L2Config.Confirmations, proofGenerationFreq, skippedOpcodes, cfg.L2Config.L2MessengerAddress, messengerABI, l2db)
rc.Start()
// Create several transactions and commit to block
numTransactions := 3
for i := 0; i < numTransactions; i++ {
tx := mock.SendTxToL2Client(t, client, cfg.L2Config.RelayerConfig.PrivateKey)
// wait for mining
_, err = bind.WaitMined(context.Background(), client, tx)
assert.NoError(t, err)
}
<-time.After(10 * time.Second)
blockNum, err := client.BlockNumber(context.Background())
assert.NoError(t, err)
assert.GreaterOrEqual(t, blockNum, uint64(numTransactions))
rc.Stop()
l2db.Close()
})
t.Run("TestMonitorBridgeContract", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
t.Log("confirmations:", cfg.L2Config.Confirmations)
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
previousHeight, err = client.BlockNumber(context.Background())
assert.NoError(t, err)
auth := prepareAuth(t, client, cfg.L2Config.RelayerConfig.PrivateKey)
// deploy mock bridge
_, tx, instance, err := mock_bridge.DeployMockBridge(auth, client)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), client, tx)
assert.NoError(t, err)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
rc := prepareRelayerClient(client, db, address)
rc.Start()
// Call mock_bridge instance sendMessage to trigger emit events
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, err := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, err)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
//extra block mined
addr = common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message = []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
var latestHeight uint64
latestHeight, err = client.BlockNumber(context.Background())
assert.NoError(t, err)
t.Log("Latest height is", latestHeight)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("Height in DB is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2UnprocessedMessages()
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
rc.Stop()
db.Close()
})
t.Run("TestFetchMultipleSentMessageInOneBlock", func(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
mock.ClearDB(t, TEST_CONFIG.DB_CONFIG)
previousHeight, err := client.BlockNumber(context.Background()) // shallow the global previousHeight
assert.NoError(t, err)
auth := prepareAuth(t, client, cfg.L2Config.RelayerConfig.PrivateKey)
_, trx, instance, err := mock_bridge.DeployMockBridge(auth, client)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), client, trx)
assert.NoError(t, err)
db := mock.PrepareDB(t, TEST_CONFIG.DB_CONFIG)
rc := prepareRelayerClient(client, db, address)
rc.Start()
// Call mock_bridge instance sendMessage to trigger emit events multiple times
numTransactions := 4
var tx *types.Transaction
for i := 0; i < numTransactions; i++ {
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
}
receipt, err := bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// extra block mined
addr := common.HexToAddress("0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63")
nonce, nounceErr := client.PendingNonceAt(context.Background(), addr)
assert.NoError(t, nounceErr)
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), client, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// wait for dealing time
<-time.After(6 * time.Second)
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2UnprocessedMessages()
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
rc.Stop()
db.Close()
})
// Teardown
t.Cleanup(func() {
assert.NoError(t, l1gethImg.Stop())
assert.NoError(t, l2gethImg.Stop())
assert.NoError(t, dbImg.Stop())
})
}
func TestTraceHasUnsupportedOpcodes(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
delegateTrace, err := os.ReadFile("../../common/testdata/blockResult_delegate.json")
assert.NoError(t, err)
trace := &types.BlockResult{}
assert.NoError(t, json.Unmarshal(delegateTrace, &trace))
unsupportedOpcodes := make(map[string]struct{})
for _, val := range cfg.L2Config.SkippedOpcodes {
unsupportedOpcodes[val] = struct{}{}
}
}
func prepareRelayerClient(client *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *l2.WatcherClient {
messengerABI, _ := bridge_abi.L1MessengerMetaData.GetAbi()
return l2.NewL2WatcherClient(context.Background(), client, 0, 1, map[string]struct{}{}, contractAddr, messengerABI, db)
}
func prepareAuth(t *testing.T, client *ethclient.Client, private string) *bind.TransactOpts {
privateKey, err := crypto.HexToECDSA(private)
assert.NoError(t, err)
publicKey := privateKey.Public()
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
assert.True(t, ok)
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(53077))
assert.NoError(t, err)
auth.Nonce = big.NewInt(int64(nonce))
auth.Value = big.NewInt(0) // in wei
auth.GasLimit = uint64(30000000) // in units
auth.GasPrice, err = client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
return auth
}

251
bridge/mock/mock.go Normal file
View File

@@ -0,0 +1,251 @@
package mock
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/binary"
"encoding/json"
"io"
"math/big"
"net"
"os"
"testing"
"time"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/gorilla/websocket"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/common/message"
"scroll-tech/coordinator"
coordinator_config "scroll-tech/coordinator/config"
bridge_config "scroll-tech/bridge/config"
"scroll-tech/bridge/l2"
"scroll-tech/common/docker"
docker_db "scroll-tech/database/docker"
)
// PerformHandshake sets up a websocket client to connect to the roller manager.
func PerformHandshake(t *testing.T, c *websocket.Conn) {
// Try to perform handshake
pk, sk := generateKeyPair()
authMsg := &message.AuthMessage{
Identity: message.Identity{
Name: "testRoller",
Timestamp: time.Now().UnixNano(),
PublicKey: common.Bytes2Hex(pk),
},
Signature: "",
}
hash, err := authMsg.Identity.Hash()
assert.NoError(t, err)
sig, err := secp256k1.Sign(hash, sk)
assert.NoError(t, err)
authMsg.Signature = common.Bytes2Hex(sig)
b, err := json.Marshal(authMsg)
assert.NoError(t, err)
msg := &message.Msg{
Type: message.Register,
Payload: b,
}
b, err = json.Marshal(msg)
assert.NoError(t, err)
assert.NoError(t, c.WriteMessage(websocket.BinaryMessage, b))
}
func generateKeyPair() (pubkey, privkey []byte) {
key, err := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader)
if err != nil {
panic(err)
}
pubkey = elliptic.Marshal(secp256k1.S256(), key.X, key.Y)
privkey = make([]byte, 32)
blob := key.D.Bytes()
copy(privkey[32-len(blob):], blob)
return pubkey, privkey
}
// SetupMockVerifier sets up a mocked verifier for a test case.
func SetupMockVerifier(t *testing.T, verifierEndpoint string) {
err := os.RemoveAll(verifierEndpoint)
assert.NoError(t, err)
l, err := net.Listen("unix", verifierEndpoint)
assert.NoError(t, err)
go func() {
conn, err := l.Accept()
assert.NoError(t, err)
// Simply read all incoming messages and send a true boolean straight back.
for {
// Read length
buf := make([]byte, 4)
_, err = io.ReadFull(conn, buf)
assert.NoError(t, err)
// Read message
msgLength := binary.LittleEndian.Uint64(buf)
buf = make([]byte, msgLength)
_, err = io.ReadFull(conn, buf)
assert.NoError(t, err)
// Return boolean
buf = []byte{1}
_, err = conn.Write(buf)
assert.NoError(t, err)
}
}()
}
// L1GethTestConfig is the http and web socket port of l1geth docker
type L1GethTestConfig struct {
HPort int
WPort int
}
// L2GethTestConfig is the http and web socket port of l2geth docker
type L2GethTestConfig struct {
HPort int
WPort int
}
// DbTestconfig is the test config of database docker
type DbTestconfig struct {
DbName string
DbPort int
DB_CONFIG *database.DBConfig
}
// TestConfig is the config for test
type TestConfig struct {
L1GethTestConfig
L2GethTestConfig
DbTestconfig
}
// NewTestL1Docker starts and returns l1geth docker
func NewTestL1Docker(t *testing.T, tcfg *TestConfig) docker.ImgInstance {
img_geth := docker.NewImgGeth(t, "scroll_l1geth", "", "", tcfg.L1GethTestConfig.HPort, tcfg.L1GethTestConfig.WPort)
assert.NoError(t, img_geth.Start())
return img_geth
}
// NewTestL2Docker starts and returns l2geth docker
func NewTestL2Docker(t *testing.T, tcfg *TestConfig) docker.ImgInstance {
img_geth := docker.NewImgGeth(t, "scroll_l2geth", "", "", tcfg.L2GethTestConfig.HPort, tcfg.L2GethTestConfig.WPort)
assert.NoError(t, img_geth.Start())
return img_geth
}
// GetDbDocker starts and returns database docker
func GetDbDocker(t *testing.T, tcfg *TestConfig) docker.ImgInstance {
img_db := docker_db.NewImgDB(t, "postgres", "123456", tcfg.DbName, tcfg.DbPort)
assert.NoError(t, img_db.Start())
return img_db
}
// L2gethDocker return mock l2geth client created with docker for test
func L2gethDocker(t *testing.T, cfg *bridge_config.Config, tcfg *TestConfig) (*l2.Backend, docker.ImgInstance, docker.ImgInstance) {
// initialize l2geth docker image
img_geth := NewTestL2Docker(t, tcfg)
cfg.L2Config.Endpoint = img_geth.Endpoint()
// initialize db docker image
img_db := GetDbDocker(t, tcfg)
db, err := database.NewOrmFactory(&database.DBConfig{
DriverName: "postgres",
DSN: img_db.Endpoint(),
})
assert.NoError(t, err)
client, err := l2.New(context.Background(), cfg.L2Config, db)
assert.NoError(t, err)
return client, img_geth, img_db
}
// SetupRollerManager return coordinator.Manager for testcase
func SetupRollerManager(t *testing.T, cfg *coordinator_config.Config, orm database.OrmFactory) *coordinator.Manager {
// Load config file.
ctx := context.Background()
SetupMockVerifier(t, cfg.RollerManagerConfig.VerifierEndpoint)
rollerManager, err := coordinator.New(ctx, cfg.RollerManagerConfig, orm)
assert.NoError(t, err)
// Start rollermanager modules.
err = rollerManager.Start()
assert.NoError(t, err)
return rollerManager
}
// ClearDB clears db
func ClearDB(t *testing.T, db_cfg *database.DBConfig) {
factory, err := database.NewOrmFactory(db_cfg)
assert.NoError(t, err)
db := factory.GetDB()
version0 := int64(0)
err = migrate.Rollback(db.DB, &version0)
assert.NoError(t, err)
err = migrate.Migrate(db.DB)
assert.NoError(t, err)
err = db.DB.Close()
assert.NoError(t, err)
}
// PrepareDB will return DB for testcase
func PrepareDB(t *testing.T, db_cfg *database.DBConfig) database.OrmFactory {
db, err := database.NewOrmFactory(db_cfg)
assert.NoError(t, err)
return db
}
// SendTxToL2Client will send a default Tx by calling l2geth client
func SendTxToL2Client(t *testing.T, client *ethclient.Client, private string) *types.Transaction {
privateKey, err := crypto.HexToECDSA(private)
assert.NoError(t, err)
publicKey := privateKey.Public()
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
assert.True(t, ok)
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
assert.NoError(t, err)
value := big.NewInt(1000000000) // in wei
gasLimit := uint64(30000000) // in units
gasPrice, err := client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
tx := types.NewTransaction(nonce, toAddress, value, gasLimit, gasPrice, nil)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), privateKey)
assert.NoError(t, err)
assert.NoError(t, client.SendTransaction(context.Background(), signedTx))
return signedTx
}

View File

@@ -0,0 +1,42 @@
//SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract Mock_Bridge {
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
function sendMessage(
address _to,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
// @todo compute fee
uint256 _fee = 0;
uint256 _nonce = messageNonce;
require(msg.value >= _fee, "cannot pay fee");
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
unchecked {
messageNonce = _nonce + 1;
}
}
}

View File

@@ -1,8 +1,5 @@
#!/bin/bash
# generates go bindings from contracts, to paste into abi/bridge_abi.go
# compile artifacts in /contracts folder with `forge build`` first
# Only run if it is ran from repository root.
if [[ ! -d "cmd" ]]
then
@@ -17,20 +14,16 @@ else
mkdir -p contracts
fi
abi_name=("IL1GatewayRouter" "IL2GatewayRouter" "IL1ScrollMessenger" "IL2ScrollMessenger" "IScrollChain" "L1MessageQueue")
pkg_name=("l1_gateway" "l2_gateway" "l1_messenger" "l2_messenger" "scrollchain" "l1_message_queue")
gen_name=("L1GatewayRouter" "L2GatewayRouter" "L1ScrollMessenger" "L2ScrollMessenger" "IScrollChain" "L1MessageQueue")
abi_name=("IL1GatewayRouter" "IL2GatewayRouter" "IL1ScrollMessenger" "IL2ScrollMessenger" "ZKRollup")
pkg_name=("l1_gateway" "l2_gateway" "l1_messenger" "l2_messenger" "rollup")
gen_name=("L1GatewayRouter" "L2GatewayRouter" "L1ScrollMessenger" "L2ScrollMessenger" "ZKRollup")
for i in "${!abi_name[@]}"; do
mkdir -p tmp
abi="tmp/${abi_name[$i]}.json"
cat ../scroll-contracts/artifacts/src/${abi_name[$i]}.sol/${abi_name[$i]}.json | jq '.abi' > $abi
pkg="${pkg_name[$i]}_abi"
abi="bridge/abi/${abi_name[$i]}.json"
pkg="${pkg_name[$i]}"
out="contracts/${pkg}/${gen_name[$i]}.go"
echo "generating ${out} from ${abi}"
mkdir -p contracts/$pkg
abigen --abi=$abi --pkg=$pkg --out=$out
awk '{sub("github.com/ethereum","github.com/scroll-tech")}1' $out > temp && mv temp $out
done
rm -rf tmp
done

402
bridge/sender/sender.go Normal file
View File

@@ -0,0 +1,402 @@
package sender
import (
"context"
"crypto/ecdsa"
"fmt"
"math/big"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/bridge/config"
)
const (
// AccessListTxType type for AccessListTx
AccessListTxType = "AccessListTx"
// DynamicFeeTxType type for DynamicFeeTx
DynamicFeeTxType = "DynamicFeeTx"
// LegacyTxType type for LegacyTx
LegacyTxType = "LegacyTx"
)
// DefaultSenderConfig The default config
var DefaultSenderConfig = config.SenderConfig{
Endpoint: "",
EscalateBlocks: 3,
EscalateMultipleNum: 11,
EscalateMultipleDen: 10,
MaxGasPrice: 1000_000_000_000, // this is 1000 gwei
TxType: AccessListTxType,
}
// Confirmation struct used to indicate transaction confirmation details
type Confirmation struct {
ID string
IsSuccessful bool
TxHash common.Hash
}
// FeeData fee struct used to estimate gas price
type FeeData struct {
gasFeeCap *big.Int
gasTipCap *big.Int
gasPrice *big.Int
gasLimit uint64
}
// PendingTransaction submitted but pending transactions
type PendingTransaction struct {
submitAt uint64
id string
feeData *FeeData
tx *types.Transaction
}
// Sender Transaction sender to send transaction to l1/l2 geth
type Sender struct {
config *config.SenderConfig
client *ethclient.Client // The client to retrieve on chain data or send transaction.
chainID *big.Int // The chain id of the endpoint
ctx context.Context
mu sync.Mutex
auth *bind.TransactOpts
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingTxs sync.Map // Mapping from nonce to pending transaction
confirmCh chan *Confirmation
sendTxErrCh chan error
stopCh chan struct{}
}
// NewSender returns a new instance of transaction sender
// txConfirmationCh is used to notify confirmed transaction
func NewSender(ctx context.Context, config *config.SenderConfig, prv *ecdsa.PrivateKey) (*Sender, error) {
if config == nil {
config = &DefaultSenderConfig
}
client, err := ethclient.Dial(config.Endpoint)
if err != nil {
return nil, err
}
// get chainID from client
chainID, err := client.ChainID(ctx)
if err != nil {
return nil, err
}
auth, err := bind.NewKeyedTransactorWithChainID(prv, chainID)
if err != nil {
log.Error("failed to create account", "chainID", chainID.String(), "err", err)
return nil, err
}
log.Info("sender", "chainID", chainID.String(), "address", auth.From.String())
// set nonce
nonce, err := client.PendingNonceAt(ctx, auth.From)
if err != nil {
log.Error("failed to get pending nonce", "address", auth.From.String(), "err", err)
return nil, err
}
auth.Nonce = big.NewInt(int64(nonce))
// get header by number
header, err := client.HeaderByNumber(ctx, nil)
if err != nil {
return nil, err
}
sender := &Sender{
ctx: ctx,
config: config,
client: client,
chainID: chainID,
auth: auth,
sendTxErrCh: make(chan error, 4),
confirmCh: make(chan *Confirmation, 128),
baseFeePerGas: header.BaseFee.Uint64(),
pendingTxs: sync.Map{},
stopCh: make(chan struct{}),
}
go sender.loop()
return sender, nil
}
// Stop stop the sender module.
func (s *Sender) Stop() {
close(s.stopCh)
log.Info("Transaction sender stopped")
}
// ConfirmChan channel used to communicate with transaction sender
func (s *Sender) ConfirmChan() <-chan *Confirmation {
return s.confirmCh
}
func (s *Sender) getFeeData(msg ethereum.CallMsg) (*FeeData, error) {
gasLimit, err := s.client.EstimateGas(s.ctx, msg)
if err != nil {
return nil, err
}
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
// @todo change it when Scroll enable EIP1559
if s.config.TxType != DynamicFeeTxType {
// estimate gas price
var gasPrice *big.Int
gasPrice, err = s.client.SuggestGasPrice(s.ctx)
if err != nil {
return nil, err
}
return &FeeData{
gasPrice: gasPrice,
gasLimit: gasLimit,
}, nil
}
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
if err != nil {
return nil, err
}
// Make sure feeCap is bigger than txpool's gas price. 1000000000 is l2geth's default pool.gas value.
baseFee := atomic.LoadUint64(&s.baseFeePerGas)
maxFeePerGas := math.BigMax(big.NewInt(int64(baseFee)), big.NewInt(1000000000))
return &FeeData{
gasFeeCap: math.BigMax(maxFeePerGas, gasTipCap),
gasTipCap: math.BigMin(maxFeePerGas, gasTipCap),
gasLimit: gasLimit,
}, nil
}
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte) (hash common.Hash, err error) {
if _, ok := s.pendingTxs.Load(ID); ok {
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
}
var (
// estimate gas limit
call = ethereum.CallMsg{
From: s.auth.From,
To: target,
Gas: 0,
GasPrice: nil,
GasFeeCap: nil,
GasTipCap: nil,
Value: value,
Data: data,
AccessList: make(types.AccessList, 0),
}
feeData *FeeData
tx *types.Transaction
)
// estimate gas fee
if feeData, err = s.getFeeData(call); err != nil {
return
}
if tx, err = s.createAndSendTx(feeData, target, value, data); err == nil {
// add pending transaction to queue
pending := &PendingTransaction{
tx: tx,
id: ID,
submitAt: atomic.LoadUint64(&s.blockNumber),
feeData: feeData,
}
s.pendingTxs.Store(ID, pending)
return tx.Hash(), nil
}
return
}
func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value *big.Int, data []byte) (tx *types.Transaction, err error) {
s.mu.Lock()
defer s.mu.Unlock()
var (
nonce = s.auth.Nonce.Uint64()
txData types.TxData
)
// lock here to avoit blocking when call `SuggestGasPrice`
switch s.config.TxType {
case LegacyTxType:
// for ganache mock node
txData = &types.LegacyTx{
Nonce: nonce,
GasPrice: feeData.gasPrice,
Gas: feeData.gasLimit,
To: target,
Value: new(big.Int).Set(value),
Data: common.CopyBytes(data),
V: new(big.Int),
R: new(big.Int),
S: new(big.Int),
}
case AccessListTxType:
txData = &types.AccessListTx{
ChainID: s.chainID,
Nonce: nonce,
GasPrice: feeData.gasPrice,
Gas: feeData.gasLimit,
To: target,
Value: new(big.Int).Set(value),
Data: common.CopyBytes(data),
AccessList: make(types.AccessList, 0),
V: new(big.Int),
R: new(big.Int),
S: new(big.Int),
}
default:
txData = &types.DynamicFeeTx{
Nonce: nonce,
To: target,
Data: common.CopyBytes(data),
Gas: feeData.gasLimit,
AccessList: make(types.AccessList, 0),
Value: new(big.Int).Set(value),
ChainID: s.chainID,
GasTipCap: feeData.gasTipCap,
GasFeeCap: feeData.gasFeeCap,
V: new(big.Int),
R: new(big.Int),
S: new(big.Int),
}
}
// sign and send
tx, err = s.auth.Signer(s.auth.From, types.NewTx(txData))
if err != nil {
log.Error("failed to sign tx", "err", err)
return
}
if err = s.client.SendTransaction(s.ctx, tx); err != nil {
log.Error("failed to send tx", "tx hash", tx.Hash().String(), "err", err)
s.sendTxErrCh <- err
return
}
// update nonce
s.auth.Nonce = big.NewInt(int64(nonce + 1))
return
}
func (s *Sender) resubmitTransaction(feeData *FeeData, tx *types.Transaction) (*types.Transaction, error) {
// @todo move query out of lock scope
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
switch s.config.TxType {
case LegacyTxType, AccessListTxType: // `LegacyTxType`is for ganache mock node
gasPrice := escalateMultipleNum.Mul(escalateMultipleNum, big.NewInt(feeData.gasPrice.Int64()))
gasPrice = gasPrice.Div(gasPrice, escalateMultipleDen)
if gasPrice.Cmp(feeData.gasPrice) < 0 {
gasPrice = feeData.gasPrice
}
if gasPrice.Cmp(maxGasPrice) > 0 {
gasPrice = maxGasPrice
}
feeData.gasPrice = gasPrice
default:
gasTipCap := big.NewInt(feeData.gasTipCap.Int64())
gasTipCap = gasTipCap.Mul(gasTipCap, escalateMultipleNum)
gasTipCap = gasTipCap.Div(gasTipCap, escalateMultipleDen)
gasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
gasFeeCap = gasFeeCap.Mul(gasFeeCap, escalateMultipleNum)
gasFeeCap = gasFeeCap.Div(gasFeeCap, escalateMultipleDen)
if gasFeeCap.Cmp(feeData.gasFeeCap) < 0 {
gasFeeCap = feeData.gasFeeCap
}
if gasTipCap.Cmp(feeData.gasTipCap) < 0 {
gasTipCap = feeData.gasTipCap
}
if gasFeeCap.Cmp(maxGasPrice) > 0 {
gasFeeCap = maxGasPrice
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
}
return s.createAndSendTx(feeData, tx.To(), tx.Value(), tx.Data())
}
// CheckPendingTransaction Check pending transaction given number of blocks to wait before confirmation.
func (s *Sender) CheckPendingTransaction(header *types.Header) {
number := header.Number.Uint64()
atomic.StoreUint64(&s.blockNumber, number)
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
s.pendingTxs.Range(func(key, value interface{}) bool {
pending := value.(*PendingTransaction)
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
if number >= receipt.BlockNumber.Uint64()+s.config.Confirmations {
s.pendingTxs.Delete(key)
// send confirm message
s.confirmCh <- &Confirmation{
ID: pending.id,
IsSuccessful: receipt.Status == types.ReceiptStatusSuccessful,
TxHash: pending.tx.Hash(),
}
}
} else if s.config.EscalateBlocks+pending.submitAt < number {
var tx *types.Transaction
tx, err := s.resubmitTransaction(pending.feeData, pending.tx)
if err != nil {
log.Error("failed to resubmit transaction, reset submitAt", "tx hash", pending.tx.Hash().String(), "err", err)
} else {
// flush submitAt
pending.tx = tx
pending.submitAt = number
}
}
return true
})
}
// Loop is the main event loop
func (s *Sender) loop() {
t := time.Duration(s.config.CheckPendingTime) * time.Second
checkTick := time.NewTicker(t)
defer checkTick.Stop()
for {
select {
case <-checkTick.C:
header, err := s.client.HeaderByNumber(s.ctx, nil)
if err != nil {
log.Error("failed to get latest head", "err", err)
continue
}
s.CheckPendingTransaction(header)
case err := <-s.sendTxErrCh:
// redress nonce
if strings.Contains(err.Error(), "nonce") {
if nonce, err := s.client.PendingNonceAt(s.ctx, s.auth.From); err != nil {
log.Error("failed to get pending nonce", "address", s.auth.From.String(), "err", err)
} else {
s.mu.Lock()
s.auth.Nonce = big.NewInt(int64(nonce))
s.mu.Unlock()
}
}
case <-s.stopCh:
return
}
}
}

View File

@@ -0,0 +1,119 @@
package sender_test
import (
"context"
"crypto/ecdsa"
"math/big"
"strconv"
"testing"
"time"
"golang.org/x/sync/errgroup"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock"
"scroll-tech/bridge/sender"
)
const TX_BATCH = 100
var (
TestConfig = &mock.TestConfig{
L1GethTestConfig: mock.L1GethTestConfig{
HPort: 0,
WPort: 8576,
},
}
l1gethImg docker.ImgInstance
private *ecdsa.PrivateKey
)
func setupEnv(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
prv, err := crypto.HexToECDSA(cfg.L2Config.RelayerConfig.PrivateKey)
assert.NoError(t, err)
private = prv
l1gethImg = mock.NewTestL1Docker(t, TestConfig)
}
func TestFunction(t *testing.T) {
// Setup
setupEnv(t)
t.Run("test Run sender", func(t *testing.T) {
// set config
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
// create newSender
newSender, err := sender.NewSender(context.Background(), cfg.L2Config.RelayerConfig.SenderConfig, private)
assert.NoError(t, err)
defer newSender.Stop()
assert.NoError(t, err)
// send transactions
idCache := cmap.New()
confirmCh := newSender.ConfirmChan()
var (
eg errgroup.Group
errCh chan error
)
go func() {
for i := 0; i < TX_BATCH; i++ {
//toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
toAddr := common.BigToAddress(big.NewInt(int64(i + 1000)))
id := strconv.Itoa(i + 1000)
eg.Go(func() error {
txHash, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
if err != nil {
t.Error("failed to send tx", "err", err)
return err
}
t.Log("successful send a tx", "ID", id, "tx hash", txHash.String())
idCache.Set(id, struct{}{})
return nil
})
}
errCh <- eg.Wait()
}()
// avoid 10 mins cause testcase panic
after := time.After(60 * time.Second)
for {
select {
case cmsg := <-confirmCh:
t.Log("get confirmations of", "ID: ", cmsg.ID, "status: ", cmsg.IsSuccessful)
assert.Equal(t, true, cmsg.IsSuccessful)
_, exist := idCache.Pop(cmsg.ID)
assert.Equal(t, true, exist)
// Receive all confirmed txs.
if idCache.Count() == 0 {
return
}
case err := <-errCh:
if err != nil {
t.Errorf("failed to send tx, err: %v", err)
return
}
assert.NoError(t, err)
case <-after:
t.Logf("newSender test failed because timeout")
t.FailNow()
}
}
})
// Teardown
t.Cleanup(func() {
assert.NoError(t, l1gethImg.Stop())
})
}

View File

@@ -1,7 +1,7 @@
# Source: https://github.com/golangci/golangci-lint/blob/master/.golangci.example.yml
# options for analysis running
run:
# default concurrency is the available CPU number
# default concurrency is a available CPU number
concurrency: 4
# timeout for analysis, e.g. 30s, 5m, default is 1m
@@ -75,10 +75,6 @@ linters-settings:
# report about shadowed variables
check-shadowing: true
gosec:
disable:
- G108
golint:
# minimal confidence for issues, default is 0.8
min-confidence: 0.8
@@ -104,12 +100,10 @@ linters-settings:
# minimal occurrences count to trigger, 3 by default
min-occurrences: 3
depguard:
rules:
main:
files:
- $all
deny:
- pkg: "github.com/davecgh/go-spew/spew"
list-type: blacklist
include-go-root: false
packages:
- github.com/davecgh/go-spew/spew
misspell:
# Correct spellings using locale preferences for US or UK.
# Default is to use a neutral variety of English.
@@ -185,13 +179,6 @@ linters:
- depguard
- gocyclo
- unparam
- exportloopref
- sqlclosecheck
- rowserrcheck
- durationcheck
- bidichk
- typecheck
- unused
enable-all: false
disable:
@@ -213,32 +200,41 @@ issues:
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- gocyclo
- errcheck
- dupl
- gosec
# Exclude abi files in bridge-history-api
- path: backend_abi\.go
# Exclude known linters from partially hard-vendored code,
# which is impossible to exclude via "nolint" comments.
- path: internal/hmac/
text: "weak cryptographic primitive"
linters:
- errcheck
- gosec
- golint
# Exclude some staticcheck messages
- linters:
- staticcheck
text: "SA9003:"
- linters:
- golint
text: "package comment should be of the form"
- linters:
- golint
text: "don't use ALL_CAPS in Go names;"
- linters:
- golint
text: "don't use underscores in Go names;"
# Exclude lll issues for long lines with go:generate
- linters:
- lll
source: "^//go:generate "
text: "long-lines"
# Exclude gosec issues for G108: Profiling endpoint is automatically exposed
- linters:
- gosec
text: "G108"
- linters:
- wsl
text: "return statements should not be cuddled if block has more than two lines"

Some files were not shown because too many files have changed in this diff Show More