Compare commits

..

1 Commits

Author SHA1 Message Date
colinlyguo
f4cac65ea5 tmp 2024-01-18 01:03:23 +08:00
685 changed files with 70835 additions and 41297 deletions

View File

@@ -32,9 +32,9 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
@@ -46,9 +46,9 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Test
run: |
make test
@@ -65,9 +65,9 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/bridge-history-api/ -w .

View File

@@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: check diff

View File

@@ -29,24 +29,24 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2025-08-18
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: ". -> target"
# - name: Lint
# working-directory: 'common'
# run: |
# rm -rf $HOME/.cache/golangci-lint
# make lint
workspaces: "common/libzkp/impl -> target"
- name: Lint
working-directory: 'common'
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
@@ -54,9 +54,9 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -79,15 +79,15 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.24'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites

138
.github/workflows/contracts.yml vendored Normal file
View File

@@ -0,0 +1,138 @@
name: Contracts
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'contracts/**'
- '.github/workflows/contracts.yaml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'contracts/**'
- '.github/workflows/contracts.yaml'
defaults:
run:
working-directory: 'contracts'
jobs:
foundry:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
submodules: recursive
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly
- name: Setup LCOV
uses: hrishikesh-kadam/setup-lcov@v1
- name: Install Node.js 14
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- name: Cache yarn dependencies
uses: actions/cache@v2
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('contracts/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Cache node_modules
id: npm_cache
uses: actions/cache@v2
with:
path: node_modules
key: node_modules-${{ hashFiles('contracts/yarn.lock') }}
- name: yarn install
# if: steps.npm_cache.outputs.cache-hit != 'true'
run: yarn install
- name: Compile with foundry
run: forge build
- name: Run foundry tests
run: forge test -vvv
- name: Run foundry coverage
run : forge coverage --report lcov
- name : Prune coverage
run : lcov --rc branch_coverage=1 --remove ./lcov.info -o ./lcov.info.pruned 'src/mocks/*' 'src/test/*' 'scripts/*' 'node_modules/*' 'lib/*'
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
files: contracts/lcov.info.pruned
flags: contracts
hardhat:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
submodules: recursive
- name: Install Node.js 14
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- name: Cache yarn dependencies
uses: actions/cache@v2
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('contracts/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Cache node_modules
id: npm_cache
uses: actions/cache@v2
with:
path: node_modules
key: node_modules-${{ hashFiles('contracts/yarn.lock') }}
- name: yarn install
# if: steps.npm_cache.outputs.cache-hit != 'true'
run: yarn install
- name: Compile with hardhat
run: npx hardhat compile
- name: Run hardhat tests
run: npx hardhat test

View File

@@ -33,15 +33,15 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2025-08-18
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Lint
working-directory: 'coordinator'
run: |
@@ -54,9 +54,9 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -77,7 +77,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v4
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - name: Build and push
@@ -95,15 +95,15 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.24'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
@@ -112,7 +112,7 @@ jobs:
- name: Test coordinator packages
working-directory: 'coordinator'
run: |
make libzkp
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3

View File

@@ -32,9 +32,9 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Lint
working-directory: 'database'
run: |
@@ -47,9 +47,9 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -72,15 +72,15 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.24'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites

View File

@@ -1,41 +0,0 @@
name: Docker-coordinator-api-arm64
on:
workflow_dispatch:
inputs:
tag:
description: "tag of this image (suffix -arm64 is added automatically)"
required: true
type: string
jobs:
build-and-push-arm64-image:
runs-on: ubuntu-latest
strategy:
matrix:
arch:
- aarch64
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
run: |
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
docker buildx create --name multiarch --driver docker-container --use
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build docker image
uses: docker/build-push-action@v2
with:
platforms: linux/arm64
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
push: true
tags: scrolltech/coordinator-api:${{inputs.tag}}-arm64

View File

@@ -5,18 +5,12 @@ on:
tags:
- v**
env:
AWS_REGION: us-west-2
jobs:
gas_oracle:
runs-on:
group: scroll-reth-runner-group
event_watcher:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
@@ -24,43 +18,45 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
- name: Build and push event_watcher docker
uses: docker/build-push-action@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: gas-oracle
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: gas-oracle
IMAGE_TAG: ${{ github.ref_name }}
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
push: true
tags: |
scrolltech/event-watcher:${{github.ref_name}}
scrolltech/event-watcher:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
gas_oracle:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push gas_oracle docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/gas-oracle:${{github.ref_name}}
scrolltech/gas-oracle:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
rollup_relayer:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
@@ -68,263 +64,68 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: rollup-relayer
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: rollup-relayer
IMAGE_TAG: ${{ github.ref_name }}
- name: Build and push rollup_relayer docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
blob_uploader:
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: blob-uploader
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: blob-uploader
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/blob_uploader.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
rollup-db-cli:
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: rollup-db-cli
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: rollup-db-cli
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/db_cli.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/rollup-relayer:${{github.ref_name}}
scrolltech/rollup-relayer:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-fetcher:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: bridgehistoryapi-fetcher
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: bridgehistoryapi-fetcher
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-fetcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
push: true
tags: |
scrolltech/bridgehistoryapi-fetcher:${{github.ref_name}}
scrolltech/bridgehistoryapi-fetcher:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-api:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: bridgehistoryapi-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: bridgehistoryapi-api
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
bridgehistoryapi-db-cli:
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: bridgehistoryapi-db-cli
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: bridgehistoryapi-db-cli
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-api docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
push: true
tags: |
scrolltech/bridgehistoryapi-api:${{github.ref_name}}
scrolltech/bridgehistoryapi-api:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
coordinator-api:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
@@ -332,42 +133,22 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: coordinator-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: coordinator-api
IMAGE_TAG: ${{ github.ref_name }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/coordinator-api:${{github.ref_name}}
scrolltech/coordinator-api:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
coordinator-cron:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
@@ -375,31 +156,14 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: coordinator-cron
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: coordinator-cron
IMAGE_TAG: ${{ github.ref_name }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator-cron.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/coordinator-cron:${{github.ref_name}}
scrolltech/coordinator-cron:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

View File

@@ -22,15 +22,15 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.24'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
@@ -38,7 +38,6 @@ jobs:
make dev_docker
make -C rollup mock_abi
make -C common/bytecode all
make -C coordinator/internal/logic/libzkp build
- name: Run integration tests
run: |
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...

View File

@@ -4,116 +4,56 @@ on:
workflow_dispatch:
inputs:
GO_VERSION:
description: "Go version"
description: 'Go version'
required: true
type: choice
options:
- "1.20"
- "1.20.14"
- "1.21"
- "1.21.13"
- "1.22"
- "1.22.12"
- "1.23"
- "1.23.7"
default: "1.21"
type: string
default: '1.20'
RUST_VERSION:
description: "Rust toolchain version"
description: 'Rust toolchain version'
required: true
type: choice
options:
- 1.86.0
- nightly-2025-08-18
default: "nightly-2025-08-18"
type: string
default: 'nightly-2022-12-10'
PYTHON_VERSION:
description: "Python version"
description: 'Python version'
required: false
type: choice
options:
- "3.10"
default: "3.10"
type: string
default: '3.10'
CUDA_VERSION:
description: "Cuda version"
description: 'Cuda version'
required: false
type: choice
options:
- "11.7.1"
- "12.2.2"
- "12.9.1"
default: "12.9.1"
CARGO_CHEF_TAG:
description: "Cargo chef version"
required: true
default: "0.1.41"
type: choice
options:
- 0.1.41
- 0.1.71
BASE_IMAGE:
description: "which intermediate image you want to update"
required: true
default: "go-alpine-builder"
type: choice
options:
- cuda-go-rust-builder
- go-rust-builder
- go-alpine-builder
- rust-builder
- rust-alpine-builder
- go-rust-alpine-builder
- py-runner
type: string
default: '11.7.1'
defaults:
run:
working-directory: "build/dockerfiles/intermediate"
working-directory: 'build/dockerfiles/intermediate'
jobs:
build-and-publish-intermediate:
runs-on:
group: scroll-reth-runner-group
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: set tag env
run: |
if [ ${{github.event.inputs.BASE_IMAGE}} == "cuda-go-rust-builder" ]; then
echo "TAG=cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-rust-builder" ]; then
echo "TAG=go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-alpine-builder" ]; then
echo "TAG=${{ github.event.inputs.GO_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "rust-builder" ]; then
echo "TAG=${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "rust-alpine-builder" ]; then
echo "TAG=${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-rust-alpine-builder" ]; then
echo "TAG=go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "py-runner" ]; then
echo "TAG=${{ github.event.inputs.PYTHON_VERSION }}" >> $GITHUB_ENV
else
echo "no BASE_IMAGE match"
fi
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/${{ github.event.inputs.BASE_IMAGE }}.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/${{ github.event.inputs.BASE_IMAGE }}:${{ env.TAG }}
build-args: |
CUDA_VERSION=${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION=${{ github.event.inputs.GO_VERSION }}
RUST_VERSION=${{ github.event.inputs.RUST_VERSION }}
PYTHON_VERSION=${{ github.event.inputs.PYTHON_VERSION }}
CARGO_CHEF_TAG=${{ github.event.inputs.CARGO_CHEF_TAG }}
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build
run: |
make all
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Publish
run: |
make publish
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}

102
.github/workflows/prover.yml vendored Normal file
View File

@@ -0,0 +1,102 @@
name: Prover
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
defaults:
run:
working-directory: 'prover'
jobs:
test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: prover
compile:
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
- name: Test
run: |
make prover
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/prover/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi

View File

@@ -34,15 +34,15 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.24'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint
@@ -58,9 +58,9 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -83,15 +83,15 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.24'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Build prerequisites
@@ -105,7 +105,7 @@ jobs:
- name: Test rollup packages
working-directory: 'rollup'
run: |
make test
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
@@ -117,7 +117,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v4
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - run: make docker

5
.gitignore vendored
View File

@@ -4,8 +4,6 @@ assets/seed
# Built binaries
build/bin
verifier.test
core.test
coverage.txt
*.integration.txt
@@ -22,6 +20,3 @@ coverage.txt
# misc
sftp-config.json
*~
target
zkvm-prover/config.json

15
.gitmodules vendored
View File

@@ -1,3 +1,12 @@
[submodule "scroll-contracts"]
path = scroll-contracts
url = https://github.com/scroll-tech/scroll-contracts.git
[submodule "l2geth"]
path = l2geth
url = git@github.com:scroll-tech/go-ethereum.git
[submodule "contracts/lib/ds-test"]
path = contracts/lib/ds-test
url = https://github.com/dapphub/ds-test
[submodule "contracts/lib/forge-std"]
path = contracts/lib/forge-std
url = https://github.com/foundry-rs/forge-std
[submodule "contracts/lib/solmate"]
path = contracts/lib/solmate
url = https://github.com/rari-capital/solmate

View File

@@ -11,7 +11,7 @@ Please note that this project is released with a [Contributor Code of Conduct][c
## Contribute to Scroll
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
## Issues and PRs

11049
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,67 +0,0 @@
[workspace]
members = [
"crates/libzkp",
"crates/l2geth",
"crates/libzkp_c",
"crates/prover-bin",
]
resolver = "2"
[workspace.package]
authors = ["Scroll developers"]
edition = "2021"
homepage = "https://scroll.io"
readme = "README.md"
repository = "https://github.com/scroll-tech/scroll"
version = "4.7.1"
[workspace.dependencies]
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll"] }
metrics = "0.23.0"
metrics-util = "0.17"
metrics-tracing-context = "0.16.0"
anyhow = "1.0"
alloy = { version = "1", default-features = false }
alloy-primitives = { version = "1.4.1", default-features = false, features = ["tiny-keccak"] }
# also use this to trigger "serde" feature for primitives
alloy-serde = { version = "1", default-features = false }
serde = { version = "1", default-features = false, features = ["derive"] }
serde_json = { version = "1.0" }
serde_derive = "1.0"
serde_with = "3"
itertools = "0.14"
tiny-keccak = "2.0"
tracing = "0.1"
eyre = "0.6"
once_cell = "1.20"
base64 = "0.22"
[patch.crates-io]
revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-bytecode = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-context = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-context-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-database = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-database-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-handler = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-inspector = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-interpreter = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-precompile = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-primitives = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-state = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
[profile.maxperf]
inherits = "release"
lto = "fat"
codegen-units = 1

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2022-2024 Scroll
Copyright (c) 2022-2023 Scroll
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,47 +1,56 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.9.17
L2GETH_TAG=scroll-v5.1.6
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
update: ## Update dependencies
update:
go work sync
cd $(PWD)/bridge-history-api/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/coordinator/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/rollup/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
lint: ## The code's format and security checks
lint: ## The code's format and security checks.
make -C rollup lint
make -C common lint
make -C coordinator lint
make -C database lint
make -C prover lint
make -C bridge-history-api lint
fmt: ## Format the code
fmt: ## format the code
go work sync
cd $(PWD)/bridge-history-api/ && go mod tidy
cd $(PWD)/common/ && go mod tidy
cd $(PWD)/coordinator/ && go mod tidy
cd $(PWD)/database/ && go mod tidy
cd $(PWD)/prover/ && go mod tidy
cd $(PWD)/rollup/ && go mod tidy
cd $(PWD)/tests/integration-test/ && go mod tidy
goimports -local scroll-tech/bridge-history-api/ -w .
goimports -local scroll-tech/common/ -w .
goimports -local scroll-tech/coordinator/ -w .
goimports -local scroll-tech/database/ -w .
goimports -local scroll-tech/rollup/ -w .
goimports -local scroll-tech/tests/integration-test/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .
goimports -local $(PWD)/database/ -w .
goimports -local $(PWD)/prover/ -w .
goimports -local $(PWD)/rollup/ -w .
goimports -local $(PWD)/tests/integration-test/ -w .
dev_docker: ## Build docker images for development/testing usages
docker pull postgres
docker build -t scroll_l1geth --platform linux/amd64 ./common/testcontainers/docker/l1geth/
docker build -t scroll_l2geth --platform linux/amd64 ./common/testcontainers/docker/l2geth/
dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
docker build -t scroll_test_image -f ./build/dockerfiles/local_testing.Dockerfile $$(mktemp -d)
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app scroll_test_image
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -1,6 +1,7 @@
# Scroll Monorepo
[![rollup](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml)
[![contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml)
[![bridge-history](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml)
[![coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml)
[![prover](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml)
@@ -16,9 +17,10 @@
├── <a href="./common/">common</a>: Common libraries and types
├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers
├── <a href="./database">database</a>: Database client and schema definition
├── <a href="./src">l2geth</a>: Scroll execution node
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
├── <a href="./rollup">rollup</a>: Rollup-related services
├── <a href="https://github.com/scroll-tech/scroll-contracts.git">scroll-contracts</a>: solidity code for Scroll L1 bridge and rollup contracts and L2 bridge and pre-deployed contracts.
├── <a href="./rpc-gateway">rpc-gateway</a>: RPC gateway external repo
└── <a href="./tests">tests</a>: Integration tests
</pre>
@@ -27,8 +29,8 @@
We welcome community contributions to this repository. Before you submit any issues or PRs, please read the [Code of Conduct](CODE_OF_CONDUCT.md) and the [Contribution Guideline](CONTRIBUTING.md).
## Prerequisites
+ Go 1.21
+ Rust (for version, see [rust-toolchain](./rust-toolchain))
+ Go 1.20
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ Hardhat / Foundry
+ Docker
@@ -39,7 +41,9 @@ docker pull postgres
make dev_docker
```
## Unit Tests
## Testing Rollup & Coordinator
### For Non-Apple Silicon (M1/M2) Macs
Run the tests using the following commands:
@@ -50,6 +54,45 @@ go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
### For Apple Silicon (M1/M2) Macs
To run tests on Apple Silicon Macs, build and execute the Docker image as outlined below:
#### Build a Docker Image for Testing
Use the following command to build a Docker image:
```bash
make build_test_docker
```
This command builds a Docker image named `scroll_test_image` using the Dockerfile found at `./build/dockerfiles/local_test.Dockerfile`.
#### Run Docker Image
After the image is built, run a Docker container from it:
```bash
make run_test_docker
```
This command runs a Docker container named `scroll_test_container` from the `scroll_test_image` image. The container uses the host network and has access to the Docker socket and the current directory.
Once the Docker container is running, execute the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
## Testing Contracts
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).
See [`contracts`](/contracts) for more details on the contracts.
## License
Scroll Monorepo is licensed under the [MIT](./LICENSE) license.

View File

@@ -37,6 +37,6 @@ reset-env:
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli && $(PWD)/build/bin/bridgehistoryapi-db-cli reset
bridgehistoryapi-docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile --platform=linux/amd64
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile --platform=linux/amd64
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile --platform=linux/amd64
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile

View File

@@ -79,50 +79,3 @@ provides REST APIs. Please refer to the API details below.
// @Success 200
// @Router /api/txsbyhashes [post]
```
## Running bridge-history-api locally
1. Pull the latest Redis image:
```
docker pull redis:latest
```
2. Run the Redis container:
```
docker run --name bridgehistoryapi-redis -d -p 6379:6379 redis:latest
```
3. Pull the latest PostgreSQL image:
```
docker pull postgres:latest
```
4. Run the PostgreSQL container:
```
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
```
5. Run database migrations to initialize the tables:
```
make bridgehistoryapi-db-cli
./build/bin/bridgehistoryapi-db-cli migrate
```
6. Run bridgehistoryapi-fetcher:
```
make bridgehistoryapi-fetcher
./build/bin/bridgehistoryapi-fetcher
```
7. Run bridgehistoryapi-api:
```
make bridgehistoryapi-api
./build/bin/bridgehistoryapi-api
```
The endpoints provided in [./conf/config.json](./conf/config.json) are all public endpoints and have rate limits.
For production usage:
- For L1 endpoints, utilizing a service provider's free tier should suffice.
- For L2 endpoints, consider [running a Scroll L2geth node](https://docs.scroll.xyz/en/developers/guides/running-a-scroll-node) and using the exposed HTTP port.

File diff suppressed because one or more lines are too long

View File

@@ -1,13 +0,0 @@
package backendabi
import (
"testing"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
)
func TestEventSignatures(t *testing.T) {
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), L1RevertBatchV0EventSig)
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,uint256)")), L1RevertBatchV7EventSig)
}

View File

@@ -96,7 +96,7 @@ func action(ctx *cli.Context) error {
return nil
}
// Run bridge-history-backend api cmd instance.
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)

View File

@@ -26,27 +26,32 @@ func init() {
Name: "reset",
Usage: "Clean and reset database.",
Action: resetDB,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "status",
Usage: "Check migration status.",
Action: checkDBStatus,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "version",
Usage: "Display the current database version.",
Action: dbVersion,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "migrate",
Usage: "Migrate the database to the latest version.",
Action: migrateDB,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "rollback",
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
Action: rollbackDB,
Flags: []cli.Flag{
&utils.ConfigFileFlag,
&cli.IntFlag{
Name: "version",
Usage: "Rollback to the specified version.",

View File

@@ -68,10 +68,7 @@ func action(ctx *cli.Context) error {
observability.Server(ctx, db)
l1MessageFetcher, err := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
if err != nil {
log.Crit("failed to create L1MessageFetcher", "err", err)
}
l1MessageFetcher := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
go l1MessageFetcher.Start()
l2MessageFetcher := fetcher.NewL2MessageFetcher(subCtx, cfg.L2, db, l2Client)
@@ -87,7 +84,7 @@ func action(ctx *cli.Context) error {
return nil
}
// Run bridge-history-backend fetcher cmd instance.
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)

View File

@@ -3,8 +3,8 @@
"confirmation": 0,
"endpoint": "https://rpc.ankr.com/eth",
"startHeight": 18306000,
"blockTime": 12,
"fetchLimit": 16,
"blockTime": 10,
"fetchLimit": 30,
"MessengerAddr": "0x6774Bcbd5ceCeF1336b5300fb5186a12DDD8b367",
"ETHGatewayAddr": "0x7F2b8C31F88B6006c382775eea88297Ec1e3E905",
"WETHGatewayAddr": "0x7AC440cAe8EB6328de4fA621163a792c1EA9D4fE",
@@ -15,21 +15,15 @@
"USDCGatewayAddr": "0xf1AF3b23DE0A5Ca3CAb7261cb0061C0D779A5c7B",
"LIDOGatewayAddr": "0x6625C6332c9F91F2D27c304E729B86db87A3f504",
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
"PufferGatewayAddr": "0xA033Ff09f2da45f0e9ae495f525363722Df42b2a",
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
"MessageQueueV2Addr": "0x0000000000000000000000000000000000000000",
"BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4",
"GasTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
"BlobScanAPIEndpoint": "https://api.blobscan.com/blobs/"
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
},
"L2": {
"confirmation": 0,
"endpoint": "https://rpc.scroll.io",
"blockTime": 3,
"fetchLimit": 64,
"fetchLimit": 100,
"MessengerAddr": "0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC",
"ETHGatewayAddr": "0x6EA73e05AdC79974B931123675ea8F78FfdacDF0",
"WETHGatewayAddr": "0x7003E7B7186f0E6601203b99F7B8DECBfA391cf9",
@@ -40,10 +34,7 @@
"USDCGatewayAddr": "0x33B60d5Dd260d453cAC3782b0bDC01ce84672142",
"LIDOGatewayAddr": "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9",
"DAIGatewayAddr": "0xaC78dff3A87b5b534e366A93E785a0ce8fA6Cc62",
"PufferGatewayAddr": "0x9eBf2f33526CD571f8b2ad312492cb650870CFd6",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79",
"MessageQueueAddr": "0x5300000000000000000000000000000000000000",
"BatchBridgeGatewayAddr": "0xa1a12158bE6269D7580C63eC5E609Cdc0ddD82bC"
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79"
},
"db": {
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",

View File

@@ -1,127 +1,92 @@
module scroll-tech/bridge-history-api
go 1.22
toolchain go1.22.2
go 1.20
require (
github.com/gin-contrib/cors v1.5.0
github.com/gin-gonic/gin v1.9.1
github.com/go-redis/redis/v8 v8.11.5
github.com/google/uuid v1.4.0
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0
github.com/prometheus/client_golang v1.14.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c
github.com/stretchr/testify v1.8.4
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
golang.org/x/sync v0.5.0
gorm.io/gorm v1.25.5
)
// Hotfix for header hash incompatibility issue.
// PR: https://github.com/scroll-tech/go-ethereum/pull/1133/
// CAUTION: Requires careful handling. When upgrading go-ethereum, ensure this fix remains up-to-date in this branch.
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b
require (
dario.cat/mergo v1.0.0 // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/bytedance/sonic v1.10.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/consensys/bavard v0.1.27 // indirect
github.com/consensys/gnark-crypto v0.16.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
github.com/docker/docker v26.1.0+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.15.5 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/jackc/pgx/v5 v5.5.4 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/gomega v1.27.1 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/scroll-tech/zktrie v0.7.1 // indirect
github.com/sethvargo/go-retry v0.2.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.15 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/crypto v0.16.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/protobuf v1.33.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

View File

@@ -1,32 +1,15 @@
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/ClickHouse/ch-go v0.58.2 h1:jSm2szHbT9MCAB1rJ3WuCJqmGLi5UTjlNu+f530UTS0=
github.com/ClickHouse/ch-go v0.58.2/go.mod h1:Ap/0bEmiLa14gYjCiRkYGbXvbe8vwdrfTYWhsuQ99aw=
github.com/ClickHouse/clickhouse-go/v2 v2.15.0 h1:G0hTKyO8fXXR1bGnZ0DY3vTG01xYfOGW76zgjg5tmC4=
github.com/ClickHouse/clickhouse-go/v2 v2.15.0/go.mod h1:kXt1SRq0PIRa6aKZD7TnFnY9PQKmc2b13sHtOYcK6cQ=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -41,10 +24,7 @@ github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ
github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
@@ -53,53 +33,33 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs=
github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible h1:xhVCHXq+P5LhT31+RuDuk0xXEbEnd50Fr37J1bGuyWg=
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v26.1.0+incompatible h1:W1G9MPNbskA6VZWL7b3ZljTh0pXI68FpINx0GKaOdaM=
github.com/docker/docker v26.1.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T/Lao=
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk=
github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
@@ -107,19 +67,10 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
@@ -129,63 +80,44 @@ github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QX
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
@@ -194,28 +126,22 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4=
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
@@ -223,134 +149,90 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=
github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M=
github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4=
github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg=
github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s=
github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.16.0 h1:xMJUsZdHLqSnCqESyKSqEfcYVYsUuup1nrOhaEFftQg=
github.com/pressly/goose/v3 v3.16.0/go.mod h1:JwdKVnmCRhnF6XLQs2mHEQtucFD49cQBdRM4UiwkxsM=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c h1:MnAdt80steCDli4SAD0J0spBGNY+gQvbdptNjWztHcw=
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c/go.mod h1:4HrFcoStbViFVy/9l/rvKl1XmizVAaPdgqI8v0U8hOc=
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
@@ -360,25 +242,17 @@ github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZ
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw=
github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dzWP1Lu+A40W883dK/Mr3xyDSM/2MggS8GtHT0qgAnE=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
@@ -387,108 +261,69 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde h1:9DShaph9qhkIYw7QF91I/ynrr4cOO2PZra2PFD7Mfeg=
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI=
modernc.org/libc v1.32.0 h1:yXatHTrACp3WaKNRCoZwUK7qj5V8ep1XyY0ka4oYcNc=
modernc.org/libc v1.32.0/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8=
modernc.org/sqlite v1.27.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=

View File

@@ -6,14 +6,13 @@ import (
"path/filepath"
"scroll-tech/common/database"
"scroll-tech/common/utils"
)
// FetcherConfig is the configuration of Layer1 or Layer2 fetcher.
type FetcherConfig struct {
// LayerConfig is the configuration of Layer1/Layer2
type LayerConfig struct {
Confirmation uint64 `json:"confirmation"`
Endpoint string `json:"endpoint"`
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, message proof should be updated from the very beginning.
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, otherwise in the current implementation, the message proof could not be successfully updated.
BlockTime int64 `json:"blockTime"`
FetchLimit uint64 `json:"fetchLimit"`
MessengerAddr string `json:"MessengerAddr"`
@@ -24,21 +23,11 @@ type FetcherConfig struct {
DAIGatewayAddr string `json:"DAIGatewayAddr"`
USDCGatewayAddr string `json:"USDCGatewayAddr"`
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
PufferGatewayAddr string `json:"PufferGatewayAddr"`
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
ScrollChainAddr string `json:"ScrollChainAddr"`
GatewayRouterAddr string `json:"GatewayRouterAddr"`
MessageQueueAddr string `json:"MessageQueueAddr"`
MessageQueueV2Addr string `json:"MessageQueueV2Addr"`
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
GasTokenGatewayAddr string `json:"GasTokenGatewayAddr"`
WrappedTokenGatewayAddr string `json:"WrappedTokenGatewayAddr"`
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
AwsS3Endpoint string `json:"AwsS3Endpoint"`
}
// RedisConfig redis config
@@ -54,8 +43,8 @@ type RedisConfig struct {
// Config is the configuration of the bridge history backend
type Config struct {
L1 *FetcherConfig `json:"L1"`
L2 *FetcherConfig `json:"L2"`
L1 *LayerConfig `json:"L1"`
L2 *LayerConfig `json:"L2"`
DB *database.Config `json:"db"`
Redis *RedisConfig `json:"redis"`
}
@@ -73,11 +62,5 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_BRIDGE_HISTORY")
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -8,17 +8,8 @@ import (
)
var (
// TxsByAddressCtl the TxsByAddressController instance
TxsByAddressCtl *TxsByAddressController
// TxsByHashesCtl the TxsByHashesController instance
TxsByHashesCtl *TxsByHashesController
// L2UnclaimedWithdrawalsByAddressCtl the L2UnclaimedWithdrawalsByAddressController instance
L2UnclaimedWithdrawalsByAddressCtl *L2UnclaimedWithdrawalsByAddressController
// L2WithdrawalsByAddressCtl the L2WithdrawalsByAddressController instance
L2WithdrawalsByAddressCtl *L2WithdrawalsByAddressController
// HistoryCtrler is controller instance
HistoryCtrler *HistoryController
initControllerOnce sync.Once
)
@@ -26,9 +17,6 @@ var (
// InitController inits Controller with database
func InitController(db *gorm.DB, redis *redis.Client) {
initControllerOnce.Do(func() {
TxsByAddressCtl = NewTxsByAddressController(db, redis)
TxsByHashesCtl = NewTxsByHashesController(db, redis)
L2UnclaimedWithdrawalsByAddressCtl = NewL2UnclaimedWithdrawalsByAddressController(db, redis)
L2WithdrawalsByAddressCtl = NewL2WithdrawalsByAddressController(db, redis)
HistoryCtrler = NewHistoryController(db, redis)
})
}

View File

@@ -0,0 +1,94 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// HistoryController contains the query claimable txs service
type HistoryController struct {
historyLogic *logic.HistoryLogic
}
// NewHistoryController return HistoryController instance
func NewHistoryController(db *gorm.DB, redis *redis.Client) *HistoryController {
return &HistoryController{
historyLogic: logic.NewHistoryLogic(db, redis),
}
}
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
func (c *HistoryController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// GetL2WithdrawalsByAddress defines the http get method behavior
func (c *HistoryController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// GetTxsByAddress defines the http get method behavior
func (c *HistoryController) GetTxsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// PostQueryTxsByHashes defines the http post method behavior
func (c *HistoryController) PostQueryTxsByHashes(ctx *gin.Context) {
var req types.QueryByHashRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
return
}
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// L2UnclaimedWithdrawalsByAddressController the controller of GetL2UnclaimedWithdrawalsByAddress
type L2UnclaimedWithdrawalsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewL2UnclaimedWithdrawalsByAddressController create new L2UnclaimedWithdrawalsByAddressController
func NewL2UnclaimedWithdrawalsByAddressController(db *gorm.DB, redisClient *redis.Client) *L2UnclaimedWithdrawalsByAddressController {
return &L2UnclaimedWithdrawalsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
func (c *L2UnclaimedWithdrawalsByAddressController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// L2WithdrawalsByAddressController the controller of GetL2WithdrawalsByAddress
type L2WithdrawalsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewL2WithdrawalsByAddressController create new L2WithdrawalsByAddressController
func NewL2WithdrawalsByAddressController(db *gorm.DB, redisClient *redis.Client) *L2WithdrawalsByAddressController {
return &L2WithdrawalsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetL2WithdrawalsByAddress defines the http get method behavior
func (c *L2WithdrawalsByAddressController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// TxsByAddressController the controller of GetTxsByAddress
type TxsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewTxsByAddressController create new TxsByAddressController
func NewTxsByAddressController(db *gorm.DB, redisClient *redis.Client) *TxsByAddressController {
return &TxsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetTxsByAddress defines the http get method behavior
func (c *TxsByAddressController) GetTxsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// TxsByHashesController the controller of PostQueryTxsByHashes
type TxsByHashesController struct {
historyLogic *logic.HistoryLogic
}
// NewTxsByHashesController create a new TxsByHashesController
func NewTxsByHashesController(db *gorm.DB, redisClient *redis.Client) *TxsByHashesController {
return &TxsByHashesController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// PostQueryTxsByHashes query the txs by hashes
func (c *TxsByHashesController) PostQueryTxsByHashes(ctx *gin.Context) {
var req types.QueryByHashRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
return
}
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -2,7 +2,6 @@ package fetcher
import (
"context"
"fmt"
"math/big"
"time"
@@ -11,7 +10,6 @@ import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/config"
@@ -22,7 +20,7 @@ import (
// L1MessageFetcher fetches cross message events from L1 and saves them to database.
type L1MessageFetcher struct {
ctx context.Context
cfg *config.FetcherConfig
cfg *config.LayerConfig
client *ethclient.Client
l1SyncHeight uint64
@@ -37,35 +35,13 @@ type L1MessageFetcher struct {
}
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
blobClient := blob_client.NewBlobClients()
if cfg.AwsS3Endpoint != "" {
blobClient.AddBlobClient(blob_client.NewAwsS3Client(cfg.AwsS3Endpoint))
}
if cfg.BeaconNodeAPIEndpoint != "" {
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
if err != nil {
log.Warn("failed to create BeaconNodeClient", "err", err)
} else {
blobClient.AddBlobClient(beaconNodeClient)
}
}
if cfg.BlobScanAPIEndpoint != "" {
blobClient.AddBlobClient(blob_client.NewBlobScanClient(cfg.BlobScanAPIEndpoint))
}
if cfg.BlockNativeAPIEndpoint != "" {
blobClient.AddBlobClient(blob_client.NewBlockNativeClient(cfg.BlockNativeAPIEndpoint))
}
if blobClient.Size() == 0 {
return nil, fmt.Errorf("no blob client is configured")
}
func NewL1MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
c := &L1MessageFetcher{
ctx: ctx,
cfg: cfg,
client: client,
eventUpdateLogic: logic.NewEventUpdateLogic(db, true),
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client, blobClient),
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client),
}
reg := prometheus.DefaultRegisterer
@@ -82,12 +58,12 @@ func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
Help: "Latest blockchain height the L1 message fetcher has synced with.",
})
return c, nil
return c
}
// Start starts the L1 message fetching process.
func (c *L1MessageFetcher) Start() {
messageSyncedHeight, batchSyncedHeight, bridgeBatchDepositSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
messageSyncedHeight, batchSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
if dbErr != nil {
log.Crit("L1MessageFetcher start failed", "err", dbErr)
}
@@ -96,11 +72,6 @@ func (c *L1MessageFetcher) Start() {
if batchSyncedHeight > l1SyncHeight {
l1SyncHeight = batchSyncedHeight
}
if bridgeBatchDepositSyncedHeight > l1SyncHeight {
l1SyncHeight = bridgeBatchDepositSyncedHeight
}
if c.cfg.StartHeight > l1SyncHeight {
l1SyncHeight = c.cfg.StartHeight - 1
}
@@ -120,13 +91,7 @@ func (c *L1MessageFetcher) Start() {
c.updateL1SyncHeight(l1SyncHeight, header.Hash())
log.Info("Start L1 message fetcher",
"message synced height", messageSyncedHeight,
"batch synced height", batchSyncedHeight,
"bridge batch deposit height", bridgeBatchDepositSyncedHeight,
"config start height", c.cfg.StartHeight,
"sync start height", c.l1SyncHeight+1,
)
log.Info("Start L1 message fetcher", "message synced height", messageSyncedHeight, "batch synced height", batchSyncedHeight, "config start height", c.cfg.StartHeight, "sync start height", c.l1SyncHeight+1)
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
go func() {
@@ -143,6 +108,7 @@ func (c *L1MessageFetcher) Start() {
}
func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
c.l1MessageFetcherRunningTotal.Inc()
startHeight := c.l1SyncHeight + 1
endHeight, rpcErr := utils.GetBlockNumber(c.ctx, c.client, confirmation)
if rpcErr != nil {
@@ -168,7 +134,6 @@ func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
c.l1MessageFetcherReorgTotal.Inc()
log.Warn("L1 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
c.updateL1SyncHeight(resyncHeight, lastBlockHash)
c.l1MessageFetcherRunningTotal.Inc()
return
}
@@ -178,7 +143,6 @@ func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
}
c.updateL1SyncHeight(to, lastBlockHash)
c.l1MessageFetcherRunningTotal.Inc()
}
}

View File

@@ -20,7 +20,7 @@ import (
// L2MessageFetcher fetches cross message events from L2 and saves them to database.
type L2MessageFetcher struct {
ctx context.Context
cfg *config.FetcherConfig
cfg *config.LayerConfig
db *gorm.DB
client *ethclient.Client
l2SyncHeight uint64
@@ -35,7 +35,7 @@ type L2MessageFetcher struct {
}
// NewL2MessageFetcher creates a new L2MessageFetcher instance.
func NewL2MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
func NewL2MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
c := &L2MessageFetcher{
ctx: ctx,
cfg: cfg,
@@ -64,17 +64,13 @@ func NewL2MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
// Start starts the L2 message fetching process.
func (c *L2MessageFetcher) Start() {
l2SentMessageSyncedHeight, l2BridgeBatchDepositSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
l2SentMessageSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
if dbErr != nil {
log.Crit("failed to get L2 cross message processed height", "err", dbErr)
return
}
l2SyncHeight := l2SentMessageSyncedHeight
if l2BridgeBatchDepositSyncedHeight > l2SyncHeight {
l2SyncHeight = l2BridgeBatchDepositSyncedHeight
}
// Sync from an older block to prevent reorg during restart.
if l2SyncHeight < logic.L2ReorgSafeDepth {
l2SyncHeight = 0
@@ -90,8 +86,7 @@ func (c *L2MessageFetcher) Start() {
c.updateL2SyncHeight(l2SyncHeight, header.Hash())
log.Info("Start L2 message fetcher", "l2 sent message synced height", l2SentMessageSyncedHeight,
"bridge batch deposit synced height", l2BridgeBatchDepositSyncedHeight, "sync start height", l2SyncHeight+1)
log.Info("Start L2 message fetcher", "message synced height", l2SentMessageSyncedHeight, "sync start height", l2SyncHeight+1)
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
go func() {
@@ -115,6 +110,7 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
return
}
log.Info("fetch and save missing L2 events", "start height", startHeight, "end height", endHeight, "confirmation", confirmation)
c.l2MessageFetcherRunningTotal.Inc()
for from := startHeight; from <= endHeight; from += c.cfg.FetchLimit {
to := from + c.cfg.FetchLimit - 1
@@ -132,7 +128,6 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
c.l2MessageFetcherReorgTotal.Inc()
log.Warn("L2 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
c.updateL2SyncHeight(resyncHeight, lastBlockHash)
c.l2MessageFetcherRunningTotal.Inc()
return
}
@@ -141,18 +136,12 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
return
}
if updateErr := c.eventUpdateLogic.UpdateL2WithdrawMessageProofs(c.ctx, c.l2SyncHeight); updateErr != nil {
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
return
}
if updateErr := c.eventUpdateLogic.UpdateL2BridgeBatchDepositEvent(c.ctx, l2FetcherResult.BridgeBatchDepositMessage); updateErr != nil {
if updateErr := c.eventUpdateLogic.UpdateL1BatchIndexAndStatus(c.ctx, c.l2SyncHeight); updateErr != nil {
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
return
}
c.updateL2SyncHeight(to, lastBlockHash)
c.l2MessageFetcherRunningTotal.Inc()
}
}

View File

@@ -2,7 +2,7 @@ package logic
import (
"context"
"errors"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -11,16 +11,14 @@ import (
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
// EventUpdateLogic the logic of insert/update the database
type EventUpdateLogic struct {
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
bridgeBatchDepositEventOrm *orm.BridgeBatchDepositEvent
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight prometheus.Gauge
eventUpdateLogicL2MessageNonceUpdateHeight prometheus.Gauge
@@ -29,10 +27,9 @@ type EventUpdateLogic struct {
// NewEventUpdateLogic creates a EventUpdateLogic instance
func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
b := &EventUpdateLogic{
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
bridgeBatchDepositEventOrm: orm.NewBridgeBatchDepositEvent(db),
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
}
if !isL1 {
@@ -51,73 +48,64 @@ func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
}
// GetL1SyncHeight gets the l1 sync height from db
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, uint64, error) {
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, btypes.MessageTypeL1SentMessage)
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, error) {
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL1SentMessage)
if err != nil {
log.Error("failed to get L1 cross message synced height", "error", err)
return 0, 0, 0, err
return 0, 0, err
}
batchSyncedHeight, err := b.batchEventOrm.GetBatchEventSyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get L1 batch event synced height", "error", err)
return 0, 0, 0, err
return 0, 0, err
}
bridgeBatchDepositSyncedHeight, err := b.bridgeBatchDepositEventOrm.GetMessageL1SyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get l1 bridge batch deposit synced height", "error", err)
return 0, 0, 0, err
}
return messageSyncedHeight, batchSyncedHeight, bridgeBatchDepositSyncedHeight, nil
return messageSyncedHeight, batchSyncedHeight, nil
}
// GetL2MessageSyncedHeightInDB gets L2 messages synced height
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, uint64, error) {
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, btypes.MessageTypeL2SentMessage)
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, error) {
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL2SentMessage)
if err != nil {
log.Error("failed to get L2 cross message processed height", "err", err)
return 0, 0, err
return 0, err
}
l2BridgeBatchDepositSyncHeight, err := b.bridgeBatchDepositEventOrm.GetMessageL2SyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get bridge batch deposit processed height", "err", err)
return 0, 0, err
}
return l2SentMessageSyncedHeight, l2BridgeBatchDepositSyncHeight, nil
return l2SentMessageSyncedHeight, nil
}
// L1InsertOrUpdate inserts or updates l1 messages
func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult *L1FilterResult) error {
if err := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages); err != nil {
log.Error("failed to insert L1 deposit messages", "err", err)
return err
}
err := b.db.Transaction(func(tx *gorm.DB) error {
if txErr := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages, tx); txErr != nil {
log.Error("failed to insert L1 deposit messages", "err", txErr)
return txErr
}
if err := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages); err != nil {
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", err)
return err
}
if txErr := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages, tx); txErr != nil {
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", txErr)
return txErr
}
if err := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents); err != nil {
log.Error("failed to insert or update batch events", "err", err)
return err
}
if txErr := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents, tx); txErr != nil {
log.Error("failed to insert or update batch events", "err", txErr)
return txErr
}
if err := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents); err != nil {
log.Error("failed to insert L1 message queue events", "err", err)
return err
}
if txErr := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents, tx); txErr != nil {
log.Error("failed to insert L1 message queue events", "err", txErr)
return txErr
}
if err := b.crossMessageOrm.InsertFailedL1GatewayTxs(ctx, l1FetcherResult.RevertedTxs); err != nil {
log.Error("failed to insert failed L1 gateway transactions", "err", err)
return err
}
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l1FetcherResult.RevertedTxs, tx); txErr != nil {
log.Error("failed to insert L1 failed gateway router transactions", "err", txErr)
return txErr
}
return nil
})
if err := b.bridgeBatchDepositEventOrm.InsertOrUpdateL1BridgeBatchDepositEvent(ctx, l1FetcherResult.BridgeBatchDepositEvents); err != nil {
log.Error("failed to insert L1 bridge batch deposit transactions", "err", err)
if err != nil {
log.Error("failed to update db of L1 events", "err", err)
return err
}
@@ -125,11 +113,6 @@ func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult
}
func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, batchIndex, startBlock, endBlock uint64) error {
if startBlock > endBlock {
log.Warn("start block is greater than end block", "start", startBlock, "end", endBlock)
return nil
}
l2WithdrawMessages, err := b.crossMessageOrm.GetL2WithdrawalsByBlockRange(ctx, startBlock, endBlock)
if err != nil {
log.Error("failed to get L2 withdrawals by batch index", "batch index", batchIndex, "err", err)
@@ -152,8 +135,8 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
}
if withdrawTrie.NextMessageNonce != l2WithdrawMessages[0].MessageNonce {
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actual next message nonce", l2WithdrawMessages[0].MessageNonce)
return errors.New("nonce mismatch")
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actuall next message nonce", l2WithdrawMessages[0].MessageNonce)
return fmt.Errorf("nonce mismatch")
}
messageHashes := make([]common.Hash, len(l2WithdrawMessages))
@@ -165,7 +148,7 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
for i, message := range l2WithdrawMessages {
message.MerkleProof = proofs[i]
message.RollupStatus = int(btypes.RollupStatusTypeFinalized)
message.RollupStatus = int(orm.RollupStatusTypeFinalized)
message.BatchIndex = batchIndex
}
@@ -178,85 +161,49 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
return nil
}
// UpdateL2WithdrawMessageProofs updates L2 withdrawal message proofs.
func (b *EventUpdateLogic) UpdateL2WithdrawMessageProofs(ctx context.Context, height uint64) error {
lastUpdatedFinalizedBlockHeight, err := b.batchEventOrm.GetLastUpdatedFinalizedBlockHeight(ctx)
// UpdateL1BatchIndexAndStatus updates L1 finalized batch index and status
func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, height uint64) error {
finalizedBatches, err := b.batchEventOrm.GetFinalizedBatchesLEBlockHeight(ctx, height)
if err != nil {
log.Error("failed to get last updated finalized block height", "error", err)
return err
}
finalizedBatches, err := b.batchEventOrm.GetUnupdatedFinalizedBatchesLEBlockHeight(ctx, height)
if err != nil {
log.Error("failed to get unupdated finalized batches >= block height", "error", err)
log.Error("failed to get batches >= block height", "error", err)
return err
}
for _, finalizedBatch := range finalizedBatches {
log.Info("update finalized batch or bundle info of L2 withdrawals", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber)
// This method is compatible with both "finalize by batch" and "finalize by bundle" modes:
// - In "finalize by batch" mode, each batch emits a FinalizedBatch event.
// - In "finalize by bundle" mode, all batches in the bundle emit only one FinalizedBatch event, using the last batch's index and hash.
//
// The method updates two types of information in L2 withdrawal messages:
// 1. Withdraw proof generation:
// - finalize by batch: Generates proofs for each batch.
// - finalize by bundle: Generates proofs for the entire bundle at once.
// 2. Batch index updating:
// - finalize by batch: Updates the batch index for withdrawal messages in each processed batch.
// - finalize by bundle: Updates the batch index for all withdrawal messages in the bundle, using the index of the last batch in the bundle.
if updateErr := b.updateL2WithdrawMessageInfos(ctx, finalizedBatch.BatchIndex, lastUpdatedFinalizedBlockHeight+1, finalizedBatch.EndBlockNumber); updateErr != nil {
log.Error("failed to update L2 withdraw message infos", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", updateErr)
log.Info("update finalized batch info of L2 withdrawals", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber)
if updateErr := b.updateL2WithdrawMessageInfos(ctx, finalizedBatch.BatchIndex, finalizedBatch.StartBlockNumber, finalizedBatch.EndBlockNumber); updateErr != nil {
log.Error("failed to update L2 withdraw message infos", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", updateErr)
return updateErr
}
if dbErr := b.batchEventOrm.UpdateBatchEventStatus(ctx, finalizedBatch.BatchIndex); dbErr != nil {
log.Error("failed to update batch event status as updated", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", dbErr)
log.Error("failed to update batch event status as updated", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", dbErr)
return dbErr
}
lastUpdatedFinalizedBlockHeight = finalizedBatch.EndBlockNumber
b.eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight.Set(float64(finalizedBatch.EndBlockNumber))
}
return nil
}
// UpdateL2BridgeBatchDepositEvent update l2 bridge batch deposit status
func (b *EventUpdateLogic) UpdateL2BridgeBatchDepositEvent(ctx context.Context, l2BatchDistributes []*orm.BridgeBatchDepositEvent) error {
distributeFailedMap := make(map[uint64][]string)
for _, l2BatchDistribute := range l2BatchDistributes {
if btypes.TxStatusType(l2BatchDistribute.TxStatus) == btypes.TxStatusBridgeBatchDistributeFailed {
distributeFailedMap[l2BatchDistribute.BatchIndex] = append(distributeFailedMap[l2BatchDistribute.BatchIndex], l2BatchDistribute.Sender)
}
if err := b.bridgeBatchDepositEventOrm.UpdateBatchEventStatus(ctx, l2BatchDistribute); err != nil {
log.Error("failed to update L1 bridge batch distribute event", "batchIndex", l2BatchDistribute.BatchIndex, "err", err)
return err
}
}
for batchIndex, distributeFailedSenders := range distributeFailedMap {
if err := b.bridgeBatchDepositEventOrm.UpdateDistributeFailedStatus(ctx, batchIndex, distributeFailedSenders); err != nil {
log.Error("failed to update L1 bridge batch distribute failed event", "batchIndex", batchIndex, "failed senders", distributeFailedSenders, "err", err)
return err
}
}
return nil
}
// L2InsertOrUpdate inserts or updates L2 messages
func (b *EventUpdateLogic) L2InsertOrUpdate(ctx context.Context, l2FetcherResult *L2FilterResult) error {
if err := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages); err != nil {
log.Error("failed to insert L2 withdrawal messages", "err", err)
return err
}
err := b.db.Transaction(func(tx *gorm.DB) error {
if txErr := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages, tx); txErr != nil {
log.Error("failed to insert L2 withdrawal messages", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages, tx); txErr != nil {
log.Error("failed to update L2 relayed messages of L1 deposits", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l2FetcherResult.OtherRevertedTxs, tx); txErr != nil {
log.Error("failed to insert L2 failed gateway router transactions", "err", txErr)
return txErr
}
return nil
})
if err := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages); err != nil {
log.Error("failed to update L2 relayed messages of L1 deposits", "err", err)
return err
}
if err := b.crossMessageOrm.InsertFailedL2GatewayTxs(ctx, l2FetcherResult.OtherRevertedTxs); err != nil {
log.Error("failed to insert failed L2 gateway transactions", "err", err)
if err != nil {
log.Error("failed to update db of L2 events", "err", err)
return err
}
return nil

View File

@@ -16,7 +16,6 @@ import (
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/types"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -36,23 +35,20 @@ const (
// HistoryLogic services.
type HistoryLogic struct {
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
bridgeBatchDepositOrm *orm.BridgeBatchDepositEvent
redis *redis.Client
singleFlight singleflight.Group
cacheMetrics *cacheMetrics
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
redis *redis.Client
singleFlight singleflight.Group
cacheMetrics *cacheMetrics
}
// NewHistoryLogic returns bridge history services.
func NewHistoryLogic(db *gorm.DB, redis *redis.Client) *HistoryLogic {
logic := &HistoryLogic{
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
bridgeBatchDepositOrm: orm.NewBridgeBatchDepositEvent(db),
redis: redis,
cacheMetrics: initCacheMetrics(),
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
redis: redis,
cacheMetrics: initCacheMetrics(),
}
return logic
}
@@ -76,28 +72,25 @@ func (h *HistoryLogic) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, a
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
if err != nil {
return nil, err
}
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
return txHistoryInfos, nil
return messages, nil
})
if err != nil {
log.Error("failed to get L2 claimable withdrawals by address", "address", address, "error", err)
return nil, 0, err
}
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
messages, ok := result.([]*orm.CrossMessage)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
}
// GetL2WithdrawalsByAddress gets all withdrawal txs under given address.
@@ -119,28 +112,25 @@ func (h *HistoryLogic) GetL2WithdrawalsByAddress(ctx context.Context, address st
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
if err != nil {
return nil, err
}
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
return txHistoryInfos, nil
return messages, nil
})
if err != nil {
log.Error("failed to get L2 withdrawals by address", "address", address, "error", err)
return nil, 0, err
}
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
messages, ok := result.([]*orm.CrossMessage)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
}
// GetTxsByAddress gets tx infos under given address.
@@ -162,36 +152,25 @@ func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address string, page
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetTxsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetTxsByAddress(ctx, address)
if err != nil {
return nil, err
}
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
batchDepositMessages, getErr := h.bridgeBatchDepositOrm.GetTxsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
}
for _, message := range batchDepositMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromBridgeBatchDepositMessage(message))
}
return txHistoryInfos, nil
return messages, nil
})
if err != nil {
log.Error("failed to get txs by address", "address", address, "error", err)
return nil, 0, err
}
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
messages, ok := result.([]*orm.CrossMessage)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
}
// GetTxsByHashes gets tx infos under given tx hashes.
@@ -239,24 +218,15 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([
}
if len(uncachedHashes) > 0 {
messages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
var txHistories []*types.TxHistoryInfo
crossMessages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get cross messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
for _, message := range crossMessages {
txHistories = append(txHistories, getTxHistoryInfoFromCrossMessage(message))
}
batchDepositMessages, err := h.bridgeBatchDepositOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get batch deposit messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
for _, message := range batchDepositMessages {
txHistories = append(txHistories, getTxHistoryInfoFromBridgeBatchDepositMessage(message))
for _, message := range messages {
txHistories = append(txHistories, getTxHistoryInfo(message))
}
resultMap := make(map[string]*types.TxHistoryInfo)
@@ -290,19 +260,19 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([
return results, nil
}
func getTxHistoryInfoFromCrossMessage(message *orm.CrossMessage) *types.TxHistoryInfo {
func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
txHistory := &types.TxHistoryInfo{
MessageHash: message.MessageHash,
TokenType: btypes.TokenType(message.TokenType),
TokenType: orm.TokenType(message.TokenType),
TokenIDs: utils.ConvertStringToStringArray(message.TokenIDs),
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmounts),
L1TokenAddress: message.L1TokenAddress,
L2TokenAddress: message.L2TokenAddress,
MessageType: btypes.MessageType(message.MessageType),
TxStatus: btypes.TxStatusType(message.TxStatus),
MessageType: orm.MessageType(message.MessageType),
TxStatus: orm.TxStatusType(message.TxStatus),
BlockTimestamp: message.BlockTimestamp,
}
if txHistory.MessageType == btypes.MessageTypeL1SentMessage {
if txHistory.MessageType == orm.MessageTypeL1SentMessage {
txHistory.Hash = message.L1TxHash
txHistory.ReplayTxHash = message.L1ReplayTxHash
txHistory.RefundTxHash = message.L1RefundTxHash
@@ -318,7 +288,7 @@ func getTxHistoryInfoFromCrossMessage(message *orm.CrossMessage) *types.TxHistor
Hash: message.L1TxHash,
BlockNumber: message.L1BlockNumber,
}
if btypes.RollupStatusType(message.RollupStatus) == btypes.RollupStatusTypeFinalized {
if orm.RollupStatusType(message.RollupStatus) == orm.RollupStatusTypeFinalized {
txHistory.ClaimInfo = &types.ClaimInfo{
From: message.MessageFrom,
To: message.MessageTo,
@@ -336,31 +306,10 @@ func getTxHistoryInfoFromCrossMessage(message *orm.CrossMessage) *types.TxHistor
return txHistory
}
func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepositEvent) *types.TxHistoryInfo {
txHistory := &types.TxHistoryInfo{
Hash: message.L1TxHash,
TokenType: btypes.TokenType(message.TokenType),
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmount),
BlockNumber: message.L1BlockNumber,
MessageType: btypes.MessageTypeL1BatchDeposit,
TxStatus: btypes.TxStatusType(message.TxStatus),
CounterpartChainTx: &types.CounterpartChainTx{
Hash: message.L2TxHash,
BlockNumber: message.L2BlockNumber,
},
BlockTimestamp: message.BlockTimestamp,
BatchDepositFee: message.Fee,
}
if txHistory.TokenType != btypes.TokenTypeETH {
txHistory.L1TokenAddress = message.L1TokenAddress
txHistory.L2TokenAddress = message.L2TokenAddress
}
return txHistory
}
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
start := int64((pageNum - 1) * pageSize)
end := start + int64(pageSize) - 1
total, err := h.redis.ZCard(ctx, cacheKey).Result()
if err != nil {
log.Error("failed to get zcard result", "error", err)
@@ -371,11 +320,7 @@ func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pa
return nil, 0, false, nil
}
if start >= total {
return nil, 0, false, nil
}
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
values, err := h.redis.ZRange(ctx, cacheKey, start, end).Result()
if err != nil {
log.Error("failed to get zrange result", "error", err)
return nil, 0, false, err
@@ -410,14 +355,14 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
return err
}
} else {
// The transactions are sorted, thus we set the score as their index.
for _, tx := range txs {
// The transactions are sorted, thus we set the score as their indices.
for i, tx := range txs {
txBytes, err := json.Marshal(tx)
if err != nil {
log.Error("failed to marshal transaction to json", "error", err)
return err
}
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(tx.BlockTimestamp), Member: txBytes}).Err(); err != nil {
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(i), Member: txBytes}).Err(); err != nil {
log.Error("failed to add transaction to sorted set", "error", err)
return err
}
@@ -436,7 +381,12 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
return nil
}
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, txHistories []*types.TxHistoryInfo, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, messages []*orm.CrossMessage, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
var txHistories []*types.TxHistoryInfo
for _, message := range messages {
txHistories = append(txHistories, getTxHistoryInfo(message))
}
err := h.cacheTxsInfo(ctx, cacheKey, txHistories)
if err != nil {
log.Error("failed to cache txs info", "key", cacheKey, "err", err)
@@ -453,6 +403,5 @@ func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKe
log.Error("cache miss after write, expect hit", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
return pagedTxs, total, nil
}

View File

@@ -2,95 +2,29 @@ package logic
import (
"context"
"fmt"
"math/big"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
// L1EventParser the l1 event parser
type L1EventParser struct {
cfg *config.FetcherConfig
client *ethclient.Client
blobClient blob_client.BlobClient
}
// NewL1EventParser creates l1 event parser
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client, blobClient blob_client.BlobClient) *L1EventParser {
return &L1EventParser{
cfg: cfg,
client: client,
blobClient: blobClient,
}
func NewL1EventParser() *L1EventParser {
return &L1EventParser{}
}
// ParseL1CrossChainEventLogs parse l1 cross chain event logs
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, []*orm.BridgeBatchDepositEvent, error) {
l1CrossChainDepositMessages, l1CrossChainRelayedMessages, err := e.ParseL1SingleCrossChainEventLogs(ctx, logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
l1BridgeBatchDepositMessages, err := e.ParseL1BridgeBatchDepositCrossChainEventLogs(logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
return l1CrossChainDepositMessages, l1CrossChainRelayedMessages, l1BridgeBatchDepositMessages, nil
}
// ParseL1BridgeBatchDepositCrossChainEventLogs parse L1 watched batch bridge cross chain events.
func (e *L1EventParser) ParseL1BridgeBatchDepositCrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.BridgeBatchDepositEvent, error) {
var l1BridgeBatchDepositMessages []*orm.BridgeBatchDepositEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1BridgeBatchDepositSig:
event := backendabi.L1BatchBridgeGatewayDeposit{}
if err := utils.UnpackLog(backendabi.L1BatchBridgeGatewayABI, &event, "Deposit", vlog); err != nil {
log.Error("Failed to unpack batch bridge gateway deposit event", "err", err)
return nil, err
}
var tokenType btypes.TokenType
if event.Token == common.HexToAddress("0") {
tokenType = btypes.TokenTypeETH
} else {
tokenType = btypes.TokenTypeERC20
}
l1BridgeBatchDepositMessages = append(l1BridgeBatchDepositMessages, &orm.BridgeBatchDepositEvent{
TokenType: int(tokenType),
Sender: event.Sender.String(),
BatchIndex: event.BatchIndex.Uint64(),
TokenAmount: event.Amount.String(),
Fee: event.Fee.String(),
L1TokenAddress: event.Token.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDeposit),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
L1LogIndex: vlog.Index,
})
}
}
return l1BridgeBatchDepositMessages, nil
}
// ParseL1SingleCrossChainEventLogs parses L1 watched single cross chain events.
func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
// ParseL1CrossChainEventLogs parses L1 watched cross chain events.
func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l1DepositMessages []*orm.CrossMessage
var l1RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
@@ -98,64 +32,64 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
case backendabi.L1DepositETHSig:
event := backendabi.ETHMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ETHGatewayABI, &event, "DepositETH", vlog); err != nil {
log.Error("Failed to unpack DepositETH event", "err", err)
log.Warn("Failed to unpack DepositETH event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeETH)
lastMessage.TokenType = int(orm.TokenTypeETH)
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.IL1ERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil {
log.Error("Failed to unpack DepositERC20 event", "err", err)
log.Warn("Failed to unpack DepositERC20 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC20)
lastMessage.TokenType = int(orm.TokenTypeERC20)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "DepositERC721", vlog); err != nil {
log.Error("Failed to unpack DepositERC721 event", "err", err)
log.Warn("Failed to unpack DepositERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
case backendabi.L1BatchDepositERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "BatchDepositERC721", vlog); err != nil {
log.Error("Failed to unpack BatchDepositERC721 event", "err", err)
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "DepositERC1155", vlog); err != nil {
log.Error("Failed to unpack DepositERC1155 event", "err", err)
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -163,74 +97,61 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
case backendabi.L1BatchDepositERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog); err != nil {
log.Error("Failed to unpack BatchDepositERC1155 event", "err", err)
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
case backendabi.L1DepositWrappedTokenSig:
event := backendabi.WrappedTokenMessageEvent{}
if err := utils.UnpackLog(backendabi.L1WrappedTokenGatewayABI, &event, "DepositWrappedToken", vlog); err != nil {
log.Error("Failed to unpack DepositWrappedToken event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
case backendabi.L1SentMessageEventSig:
event := backendabi.L1SentMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
log.Error("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
from, err := getRealFromAddress(ctx, event.Sender, event.Message, e.client, vlog.TxHash, e.cfg.GatewayRouterAddr)
if err != nil {
log.Error("Failed to get real 'from' address", "err", err)
log.Warn("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
l1DepositMessages = append(l1DepositMessages, &orm.CrossMessage{
L1BlockNumber: vlog.BlockNumber,
Sender: from,
Sender: event.Sender.String(),
Receiver: event.Target.String(),
TokenType: int(btypes.TokenTypeETH),
TokenType: int(orm.TokenTypeETH),
L1TxHash: vlog.TxHash.String(),
TokenAmounts: event.Value.String(),
MessageNonce: event.MessageNonce.Uint64(),
MessageType: int(btypes.MessageTypeL1SentMessage),
TxStatus: int(btypes.TxStatusTypeSent),
MessageType: int(orm.MessageTypeL1SentMessage),
TxStatus: int(orm.TxStatusTypeSent),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
})
case backendabi.L1RelayedMessageEventSig:
event := backendabi.L1RelayedMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "RelayedMessage", vlog); err != nil {
log.Error("Failed to unpack RelayedMessage event", "err", err)
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return nil, nil, err
}
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeRelayed),
MessageType: int(btypes.MessageTypeL2SentMessage),
TxStatus: int(orm.TxStatusTypeRelayed),
MessageType: int(orm.MessageTypeL2SentMessage),
})
case backendabi.L1FailedRelayedMessageEventSig:
event := backendabi.L1FailedRelayedMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "FailedRelayedMessage", vlog); err != nil {
log.Error("Failed to unpack FailedRelayedMessage event", "err", err)
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
return nil, nil, err
}
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeFailedRelayed),
MessageType: int(btypes.MessageTypeL2SentMessage),
TxStatus: int(orm.TxStatusTypeFailedRelayed),
MessageType: int(orm.MessageTypeL2SentMessage),
})
}
}
@@ -238,157 +159,54 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
}
// ParseL1BatchEventLogs parses L1 watched batch events.
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client, blockTimestampsMap map[uint64]uint64) ([]*orm.BatchEvent, error) {
// Since multiple CommitBatch events per transaction is introduced >= CodecV7,
// with one transaction carrying multiple blobs,
// each CommitBatch event corresponds to a blob containing block range data.
// To correctly process these events, we need to:
// 1. Parsing the associated blob data to extract the block range for each event
// 2. Tracking the parent batch hash for each processed CommitBatch event, to:
// - Validate the batch hash, since parent batch hash is needed to calculate the batch hash
// - Derive the index of the current batch by the number of parent batch hashes tracked
// In commitBatches and commitAndFinalizeBatch, the parent batch hash is passed in calldata,
// so that we can use it to get the first batch's parent batch hash, and derive the rest.
// The index map serves this purpose with:
// Key: commit transaction hash
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
txBlobIndexMap := make(map[common.Hash][]common.Hash)
// Cache for the previous transaction to avoid duplicate fetches
var lastTxHash common.Hash
var lastTx *types.Transaction
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client) ([]*orm.BatchEvent, error) {
var l1BatchEvents []*orm.BatchEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1CommitBatchEventSig:
event := backendabi.L1CommitBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "CommitBatch", vlog); err != nil {
log.Error("Failed to unpack CommitBatch event", "err", err)
log.Warn("Failed to unpack CommitBatch event", "err", err)
return nil, err
}
// Get transaction, reuse if it's the same as previous
var commitTx *types.Transaction
if lastTxHash == vlog.TxHash && lastTx != nil {
commitTx = lastTx
} else {
log.Debug("Fetching commit batch transaction", "txHash", vlog.TxHash.String())
// Create 10-second timeout context for transaction fetch
txCtx, txCancel := context.WithTimeout(ctx, 10*time.Second)
fetchedTx, isPending, err := client.TransactionByHash(txCtx, vlog.TxHash)
txCancel()
if err != nil || isPending {
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
return nil, err
}
commitTx = fetchedTx
lastTxHash = vlog.TxHash
lastTx = commitTx
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending {
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
return nil, err
}
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
if err != nil {
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
return nil, err
}
if version >= 7 { // It's a batch with version >= 7.
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(version))
if err != nil {
return nil, fmt.Errorf("unsupported codec version: %v, err: %w", version, err)
}
// we append the batch hash to the slice for the current commit transaction after processing the batch.
// that means the current index of the batch within the transaction is len(txBlobIndexMap[vlog.TxHash]).
currentIndex := len(txBlobIndexMap[vlog.TxHash])
if currentIndex >= len(commitTx.BlobHashes()) {
return nil, fmt.Errorf("commit transaction %s has %d blobs, but trying to access index %d (batch index %d)",
vlog.TxHash.String(), len(commitTx.BlobHashes()), currentIndex, event.BatchIndex.Uint64())
}
blobVersionedHash := commitTx.BlobHashes()[currentIndex]
// validate the batch hash
var parentBatchHash common.Hash
if currentIndex == 0 {
parentBatchHash, err = utils.GetParentBatchHashFromCalldata(commitTx.Data())
if err != nil {
return nil, fmt.Errorf("failed to get parent batch header from calldata, tx hash: %s, err: %w", vlog.TxHash.String(), err)
}
} else {
// here we need to subtract 1 from the current index to get the parent batch hash.
parentBatchHash = txBlobIndexMap[vlog.TxHash][currentIndex-1]
}
calculatedBatch, err := codec.NewDABatchFromParams(event.BatchIndex.Uint64(), blobVersionedHash, parentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", event.BatchIndex.Uint64(), err)
}
if calculatedBatch.Hash() != event.BatchHash {
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
}
log.Debug("Processing blob data", "blobVersionedHash", blobVersionedHash.String(), "batchIndex", event.BatchIndex.Uint64(), "currentIndex", currentIndex)
// Create 20-second timeout context for blob processing
blobCtx, blobCancel := context.WithTimeout(ctx, 20*time.Second)
blocks, err := e.getBatchBlockRangeFromBlob(blobCtx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
blobCancel()
if err != nil {
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)
}
if len(blocks) == 0 {
return nil, fmt.Errorf("no blocks found in the blob, blobVersionedHash: %s, block number: %d, blob index: %d",
blobVersionedHash.String(), vlog.BlockNumber, currentIndex)
}
startBlock = blocks[0].Number()
endBlock = blocks[len(blocks)-1].Number()
txBlobIndexMap[vlog.TxHash] = append(txBlobIndexMap[vlog.TxHash], event.BatchHash)
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeCommitted),
BatchStatus: int(orm.BatchStatusTypeCommitted),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
StartBlockNumber: startBlock,
EndBlockNumber: endBlock,
L1BlockNumber: vlog.BlockNumber,
})
case backendabi.L1RevertBatchV0EventSig:
event := backendabi.L1RevertBatchV0Event{}
case backendabi.L1RevertBatchEventSig:
event := backendabi.L1RevertBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
log.Error("Failed to unpack RevertBatch event", "err", err)
log.Warn("Failed to unpack RevertBatch event", "err", err)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeReverted),
BatchStatus: int(orm.BatchStatusTypeReverted),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
})
case backendabi.L1RevertBatchV7EventSig:
event := backendabi.L1RevertBatchV7Event{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch0", vlog); err != nil {
log.Error("Failed to unpack RevertBatch event", "err", err)
return nil, err
}
for i := event.StartBatchIndex.Uint64(); i <= event.FinishBatchIndex.Uint64(); i++ {
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeReverted),
BatchIndex: i,
L1BlockNumber: vlog.BlockNumber,
})
}
case backendabi.L1FinalizeBatchEventSig:
event := backendabi.L1FinalizeBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
log.Error("Failed to unpack FinalizeBatch event", "err", err)
log.Warn("Failed to unpack FinalizeBatch event", "err", err)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeFinalized),
BatchStatus: int(orm.BatchStatusTypeFinalized),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
@@ -411,14 +229,14 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
case backendabi.L1QueueTransactionEventSig:
event := backendabi.L1QueueTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "QueueTransaction", vlog); err != nil {
log.Error("Failed to unpack QueueTransaction event", "err", err)
log.Warn("Failed to unpack QueueTransaction event", "err", err)
return nil, err
}
messageHash := common.BytesToHash(crypto.Keccak256(event.Data))
// If the message hash is not found in the map, it's not a replayMessage or enforced tx (omitted); add it to the events.
if _, exists := messageHashes[messageHash]; !exists {
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeQueueTransaction,
EventType: orm.MessageQueueEventTypeQueueTransaction,
QueueIndex: event.QueueIndex,
MessageHash: messageHash,
TxHash: vlog.TxHash,
@@ -427,34 +245,24 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
case backendabi.L1DequeueTransactionEventSig:
event := backendabi.L1DequeueTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DequeueTransaction", vlog); err != nil {
log.Error("Failed to unpack DequeueTransaction event", "err", err)
log.Warn("Failed to unpack DequeueTransaction event", "err", err)
return nil, err
}
skippedIndices := utils.GetSkippedQueueIndices(event.StartIndex.Uint64(), event.SkippedBitmap)
for _, index := range skippedIndices {
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeDequeueTransaction,
EventType: orm.MessageQueueEventTypeDequeueTransaction,
QueueIndex: index,
})
}
case backendabi.L1ResetDequeuedTransactionEventSig:
event := backendabi.L1ResetDequeuedTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "ResetDequeuedTransaction", vlog); err != nil {
log.Error("Failed to unpack ResetDequeuedTransaction event", "err", err)
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeResetDequeuedTransaction,
QueueIndex: event.StartIndex.Uint64(),
})
case backendabi.L1DropTransactionEventSig:
event := backendabi.L1DropTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil {
log.Error("Failed to unpack DropTransaction event", "err", err)
log.Warn("Failed to unpack DropTransaction event", "err", err)
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeDropTransaction,
EventType: orm.MessageQueueEventTypeDropTransaction,
QueueIndex: event.Index.Uint64(),
TxHash: vlog.TxHash,
})
@@ -462,63 +270,3 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
}
return l1MessageQueueEvents, nil
}
func getRealFromAddress(ctx context.Context, eventSender common.Address, eventMessage []byte, client *ethclient.Client, txHash common.Hash, gatewayRouterAddr string) (string, error) {
if eventSender != common.HexToAddress(gatewayRouterAddr) {
return eventSender.String(), nil
}
// deposit/withdraw ETH: EOA -> contract 1 -> ... -> contract n -> gateway router -> messenger.
if len(eventMessage) >= 32 {
addressBytes := eventMessage[32-common.AddressLength : 32]
var address common.Address
address.SetBytes(addressBytes)
return address.Hex(), nil
}
log.Warn("event message data too short to contain an address", "length", len(eventMessage))
// Legacy handling logic if length of message < 32, for backward compatibility before the next contract upgrade.
tx, isPending, rpcErr := client.TransactionByHash(ctx, txHash)
if rpcErr != nil || isPending {
log.Error("Failed to get transaction or the transaction is still pending", "rpcErr", rpcErr, "isPending", isPending)
return "", rpcErr
}
// Case 1: deposit/withdraw ETH: EOA -> multisig -> gateway router -> messenger.
if tx.To() != nil && (*tx.To()).String() != gatewayRouterAddr {
return (*tx.To()).String(), nil
}
// Case 2: deposit/withdraw ETH: EOA -> gateway router -> messenger.
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, err := signer.Sender(tx)
if err != nil {
log.Error("Get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", err)
return "", err
}
return sender.String(), nil
}
func (e *L1EventParser) getBatchBlockRangeFromBlob(ctx context.Context, codec encoding.Codec, blobVersionedHash common.Hash, l1BlockTime uint64) ([]encoding.DABlock, error) {
blob, err := e.blobClient.GetBlobByVersionedHashAndBlockTime(ctx, blobVersionedHash, l1BlockTime)
if err != nil {
return nil, fmt.Errorf("failed to get blob %s: %w", blobVersionedHash.Hex(), err)
}
if blob == nil {
return nil, fmt.Errorf("blob %s not found", blobVersionedHash.Hex())
}
blobPayload, err := codec.DecodeBlob(blob)
if err != nil {
return nil, fmt.Errorf("blob %s decode error: %w", blobVersionedHash.Hex(), err)
}
blocks := blobPayload.Blocks()
if len(blocks) == 0 {
return nil, fmt.Errorf("empty blocks in blob %s", blobVersionedHash.Hex())
}
log.Debug("Successfully processed blob", "blobVersionedHash", blobVersionedHash.Hex(), "blocksCount", len(blocks))
return blocks, nil
}

View File

@@ -11,13 +11,11 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
"gorm.io/gorm"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -27,20 +25,18 @@ const L1ReorgSafeDepth = 64
// L1FilterResult L1 fetcher result
type L1FilterResult struct {
DepositMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage
BatchEvents []*orm.BatchEvent
MessageQueueEvents []*orm.MessageQueueEvent
RevertedTxs []*orm.CrossMessage
BridgeBatchDepositEvents []*orm.BridgeBatchDepositEvent
DepositMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage
BatchEvents []*orm.BatchEvent
MessageQueueEvents []*orm.MessageQueueEvent
RevertedTxs []*orm.CrossMessage
}
// L1FetcherLogic the L1 fetcher logic
type L1FetcherLogic struct {
cfg *config.FetcherConfig
cfg *config.LayerConfig
client *ethclient.Client
addressList []common.Address
gatewayList []common.Address
parser *L1EventParser
db *gorm.DB
crossMessageOrm *orm.CrossMessage
@@ -50,10 +46,13 @@ type L1FetcherLogic struct {
}
// NewL1FetcherLogic creates L1 fetcher logic
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client, blobClient blob_client.BlobClient) *L1FetcherLogic {
func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
addressList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -66,65 +65,16 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.MessageQueueAddr),
}
gatewayList := []common.Address{
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
common.HexToAddress(cfg.ERC1155GatewayAddr),
common.HexToAddress(cfg.MessengerAddr),
common.HexToAddress(cfg.GatewayRouterAddr),
}
// Optional gateways.
// Optional erc20 gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
}
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
if common.HexToAddress(cfg.BatchBridgeGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
if common.HexToAddress(cfg.ETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.ETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.ETHGatewayAddr))
}
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
}
if common.HexToAddress(cfg.GasTokenGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.GasTokenGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.GasTokenGatewayAddr))
}
if common.HexToAddress(cfg.WrappedTokenGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
}
if common.HexToAddress(cfg.MessageQueueV2Addr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.MessageQueueV2Addr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList)
f := &L1FetcherLogic{
db: db,
@@ -133,8 +83,7 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
cfg: cfg,
client: client,
addressList: addressList,
gatewayList: gatewayList,
parser: NewL1EventParser(cfg, client, blobClient),
parser: NewL1EventParser(),
}
reg := prometheus.DefaultRegisterer
@@ -173,14 +122,24 @@ func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to u
return false, 0, lastBlockHash, blocks, nil
}
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) ([]*orm.CrossMessage, error) {
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, error) {
var l1RevertedTxs []*orm.CrossMessage
blockTimestampsMap := make(map[uint64]uint64)
for i := from; i <= to; i++ {
block := blocks[i-from]
blockTimestampsMap[block.NumberU64()] = block.Time()
for _, tx := range block.Transactions() {
// Gateways: L1 deposit.
txTo := tx.To()
if txTo == nil {
continue
}
toAddress := txTo.String()
// GatewayRouter: L1 deposit.
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
if !isTransactionToGateway(tx, f.gatewayList) {
if toAddress != f.cfg.GatewayRouterAddr && toAddress != f.cfg.MessengerAddr {
continue
}
@@ -188,7 +147,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, receiptErr
return nil, nil, receiptErr
}
// Check if the transaction is failed
@@ -200,21 +159,21 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
sender, senderErr := signer.Sender(tx)
if senderErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", senderErr)
return nil, senderErr
return nil, nil, senderErr
}
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
L1TxHash: tx.Hash().String(),
MessageType: int(btypes.MessageTypeL1SentMessage),
MessageType: int(orm.MessageTypeL1SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L1BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(btypes.TxStatusTypeSentTxReverted),
TxStatus: int(orm.TxStatusTypeSentTxReverted),
})
}
}
return l1RevertedTxs, nil
return blockTimestampsMap, l1RevertedTxs, nil
}
func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
@@ -225,7 +184,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 17)
query.Topics[0] = make([]common.Hash, 13)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1DepositERC721Sig
@@ -234,15 +193,11 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
query.Topics[0][5] = backendabi.L1RelayedMessageEventSig
query.Topics[0][6] = backendabi.L1FailedRelayedMessageEventSig
query.Topics[0][7] = backendabi.L1CommitBatchEventSig
query.Topics[0][8] = backendabi.L1RevertBatchV0EventSig
query.Topics[0][9] = backendabi.L1RevertBatchV7EventSig
query.Topics[0][10] = backendabi.L1FinalizeBatchEventSig
query.Topics[0][11] = backendabi.L1QueueTransactionEventSig
query.Topics[0][12] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][13] = backendabi.L1DropTransactionEventSig
query.Topics[0][14] = backendabi.L1ResetDequeuedTransactionEventSig
query.Topics[0][15] = backendabi.L1BridgeBatchDepositSig
query.Topics[0][16] = backendabi.L1DepositWrappedTokenSig
query.Topics[0][8] = backendabi.L1RevertBatchEventSig
query.Topics[0][9] = backendabi.L1FinalizeBatchEventSig
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
@@ -266,31 +221,25 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
return isReorg, reorgHeight, blockHash, nil, nil
}
l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
blockTimestampsMap, l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
if err != nil {
log.Error("L1Fetcher getRevertedTxs failed", "from", from, "to", to, "error", err)
return false, 0, common.Hash{}, nil, err
}
// Map block number to block timestamp to avoid fetching block header multiple times to get block timestamp.
blockTimestampsMap := make(map[uint64]uint64)
for _, block := range blocks {
blockTimestampsMap[block.NumberU64()] = block.Time()
}
eventLogs, err := f.l1FetcherLogs(ctx, from, to)
if err != nil {
log.Error("L1Fetcher l1FetcherLogs failed", "from", from, "to", to, "error", err)
return false, 0, common.Hash{}, nil, err
}
l1DepositMessages, l1RelayedMessages, l1BridgeBatchDepositMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L1 cross chain event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client, blockTimestampsMap)
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client)
if err != nil {
log.Error("failed to parse L1 batch event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
@@ -303,12 +252,11 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
}
res := L1FilterResult{
DepositMessages: l1DepositMessages,
RelayedMessages: l1RelayedMessages,
BatchEvents: l1BatchEvents,
MessageQueueEvents: l1MessageQueueEvents,
RevertedTxs: l1RevertedTxs,
BridgeBatchDepositEvents: l1BridgeBatchDepositMessages,
DepositMessages: l1DepositMessages,
RelayedMessages: l1RelayedMessages,
BatchEvents: l1BatchEvents,
MessageQueueEvents: l1MessageQueueEvents,
RevertedTxs: l1RevertedTxs,
}
f.updateMetrics(res)
@@ -320,23 +268,23 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_gateway_router_transaction").Add(float64(len(res.RevertedTxs)))
for _, depositMessage := range res.DepositMessages {
switch btypes.TokenType(depositMessage.TokenType) {
case btypes.TokenTypeETH:
switch orm.TokenType(depositMessage.TokenType) {
case orm.TokenTypeETH:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_eth").Add(1)
case btypes.TokenTypeERC20:
case orm.TokenTypeERC20:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc20").Add(1)
case btypes.TokenTypeERC721:
case orm.TokenTypeERC721:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc721").Add(1)
case btypes.TokenTypeERC1155:
case orm.TokenTypeERC1155:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc1155").Add(1)
}
}
for _, relayedMessage := range res.RelayedMessages {
switch btypes.TxStatusType(relayedMessage.TxStatus) {
case btypes.TxStatusTypeRelayed:
switch orm.TxStatusType(relayedMessage.TxStatus) {
case orm.TxStatusTypeRelayed:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_relayed_message").Add(1)
case btypes.TxStatusTypeFailedRelayed:
case orm.TxStatusTypeFailedRelayed:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_relayed_message").Add(1)
}
// Have not tracked L1 relayed message reverted transaction yet.
@@ -345,37 +293,24 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
}
for _, batchEvent := range res.BatchEvents {
switch btypes.BatchStatusType(batchEvent.BatchStatus) {
case btypes.BatchStatusTypeCommitted:
switch orm.BatchStatusType(batchEvent.BatchStatus) {
case orm.BatchStatusTypeCommitted:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_commit_batch_event").Add(1)
case btypes.BatchStatusTypeReverted:
case orm.BatchStatusTypeReverted:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_revert_batch_event").Add(1)
case btypes.BatchStatusTypeFinalized:
case orm.BatchStatusTypeFinalized:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_finalize_batch_event").Add(1)
}
}
for _, messageQueueEvent := range res.MessageQueueEvents {
switch messageQueueEvent.EventType {
case btypes.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
case orm.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_replay_message_or_enforced_transaction").Add(1)
case btypes.MessageQueueEventTypeDequeueTransaction:
case orm.MessageQueueEventTypeDequeueTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1)
case btypes.MessageQueueEventTypeDropTransaction:
case orm.MessageQueueEventTypeDropTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1)
// one ResetDequeuedTransaction event could indicate reset multiple skipped messages,
// this metric only counts the number of events, not the number of skipped messages.
case btypes.MessageQueueEventTypeResetDequeuedTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_reset_skipped_messages").Add(1)
}
}
for _, bridgeBatchDepositEvent := range res.BridgeBatchDepositEvents {
switch btypes.TokenType(bridgeBatchDepositEvent.TokenType) {
case btypes.TokenTypeETH:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_bridge_batch_deposit_eth").Add(1)
case btypes.TokenTypeERC20:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_bridge_batch_deposit_erc20").Add(1)
}
}
}

View File

@@ -1,101 +1,26 @@
package logic
import (
"context"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
// L2EventParser the L2 event parser
type L2EventParser struct {
cfg *config.FetcherConfig
client *ethclient.Client
}
// NewL2EventParser creates the L2 event parser
func NewL2EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L2EventParser {
return &L2EventParser{
cfg: cfg,
client: client,
}
func NewL2EventParser() *L2EventParser {
return &L2EventParser{}
}
// ParseL2EventLogs parses L2 watchedevents
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, []*orm.BridgeBatchDepositEvent, error) {
l2WithdrawMessages, l2RelayedMessages, err := e.ParseL2SingleCrossChainEventLogs(ctx, logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
l2BridgeBatchDepositMessages, err := e.ParseL2BridgeBatchDepositCrossChainEventLogs(logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
return l2WithdrawMessages, l2RelayedMessages, l2BridgeBatchDepositMessages, nil
}
// ParseL2BridgeBatchDepositCrossChainEventLogs parses L2 watched bridge batch deposit events
func (e *L2EventParser) ParseL2BridgeBatchDepositCrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.BridgeBatchDepositEvent, error) {
var l2BridgeBatchDepositEvents []*orm.BridgeBatchDepositEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2BridgeBatchDistributeSig:
event := backendabi.L2BatchBridgeGatewayBatchDistribute{}
err := utils.UnpackLog(backendabi.L2BatchBridgeGatewayABI, &event, "BatchDistribute", vlog)
if err != nil {
log.Error("Failed to unpack BatchDistribute event", "err", err)
return nil, err
}
var tokenType btypes.TokenType
if event.L1Token == common.HexToAddress("0") {
tokenType = btypes.TokenTypeETH
} else {
tokenType = btypes.TokenTypeERC20
}
l2BridgeBatchDepositEvents = append(l2BridgeBatchDepositEvents, &orm.BridgeBatchDepositEvent{
TokenType: int(tokenType),
BatchIndex: event.BatchIndex.Uint64(),
L2TokenAddress: event.L2Token.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDistribute),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
})
case backendabi.L2BridgeBatchDistributeFailedSig:
event := backendabi.L2BatchBridgeGatewayDistributeFailed{}
err := utils.UnpackLog(backendabi.L2BatchBridgeGatewayABI, &event, "DistributeFailed", vlog)
if err != nil {
log.Error("Failed to unpack DistributeFailed event", "err", err)
return nil, err
}
l2BridgeBatchDepositEvents = append(l2BridgeBatchDepositEvents, &orm.BridgeBatchDepositEvent{
BatchIndex: event.BatchIndex.Uint64(),
L2TokenAddress: event.L2Token.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDistributeFailed),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
Sender: event.Receiver.String(),
})
}
}
return l2BridgeBatchDepositEvents, nil
}
// ParseL2SingleCrossChainEventLogs parses L2 watched events
func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
// ParseL2EventLogs parses L2 watched events
func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l2WithdrawMessages []*orm.CrossMessage
var l2RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
@@ -104,25 +29,25 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
event := backendabi.ETHMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawETH event", "err", err)
log.Warn("Failed to unpack WithdrawETH event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeETH)
lastMessage.TokenType = int(orm.TokenTypeETH)
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawERC20 event", "err", err)
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC20)
lastMessage.TokenType = int(orm.TokenTypeERC20)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenAmounts = event.Amount.String()
@@ -130,13 +55,13 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
event := backendabi.ERC721MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawERC721 event", "err", err)
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -144,13 +69,13 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
event := backendabi.BatchERC721MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
if err != nil {
log.Error("Failed to unpack BatchWithdrawERC721 event", "err", err)
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
@@ -158,13 +83,13 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
event := backendabi.ERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawERC1155 event", "err", err)
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -173,13 +98,13 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
event := backendabi.BatchERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
if err != nil {
log.Error("Failed to unpack BatchWithdrawERC1155 event", "err", err)
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
@@ -188,19 +113,14 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
event := backendabi.L2SentMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Error("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
from, err := getRealFromAddress(ctx, event.Sender, event.Message, e.client, vlog.TxHash, e.cfg.GatewayRouterAddr)
if err != nil {
log.Error("Failed to get real 'from' address", "err", err)
log.Warn("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
l2WithdrawMessages = append(l2WithdrawMessages, &orm.CrossMessage{
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
Sender: from,
Sender: event.Sender.String(),
Receiver: event.Target.String(),
TokenType: int(btypes.TokenTypeETH),
TokenType: int(orm.TokenTypeETH),
L2TxHash: vlog.TxHash.String(),
TokenAmounts: event.Value.String(),
MessageFrom: event.Sender.String(),
@@ -208,8 +128,8 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
MessageValue: event.Value.String(),
MessageNonce: event.MessageNonce.Uint64(),
MessageData: hexutil.Encode(event.Message),
MessageType: int(btypes.MessageTypeL2SentMessage),
TxStatus: int(btypes.TxStatusTypeSent),
MessageType: int(orm.MessageTypeL2SentMessage),
TxStatus: int(orm.TxStatusTypeSent),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
L2BlockNumber: vlog.BlockNumber,
})
@@ -217,29 +137,29 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
event := backendabi.L2RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Error("Failed to unpack RelayedMessage event", "err", err)
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return nil, nil, err
}
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeRelayed),
MessageType: int(btypes.MessageTypeL1SentMessage),
TxStatus: int(orm.TxStatusTypeRelayed),
MessageType: int(orm.MessageTypeL1SentMessage),
})
case backendabi.L2FailedRelayedMessageEventSig:
event := backendabi.L2RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "FailedRelayedMessage", vlog)
if err != nil {
log.Error("Failed to unpack FailedRelayedMessage event", "err", err)
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
return nil, nil, err
}
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
MessageHash: event.MessageHash.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeFailedRelayed),
MessageType: int(btypes.MessageTypeL1SentMessage),
TxStatus: int(orm.TxStatusTypeFailedRelayed),
MessageType: int(orm.MessageTypeL1SentMessage),
})
}
}

View File

@@ -17,7 +17,6 @@ import (
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -27,18 +26,16 @@ const L2ReorgSafeDepth = 256
// L2FilterResult the L2 filter result
type L2FilterResult struct {
WithdrawMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
BridgeBatchDepositMessage []*orm.BridgeBatchDepositEvent
WithdrawMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
}
// L2FetcherLogic the L2 fetcher logic
type L2FetcherLogic struct {
cfg *config.FetcherConfig
cfg *config.LayerConfig
client *ethclient.Client
addressList []common.Address
gatewayList []common.Address
parser *L2EventParser
db *gorm.DB
crossMessageOrm *orm.CrossMessage
@@ -48,12 +45,13 @@ type L2FetcherLogic struct {
}
// NewL2FetcherLogic create L2 fetcher logic
func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
addressList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -62,48 +60,16 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.MessengerAddr),
}
gatewayList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
common.HexToAddress(cfg.ERC1155GatewayAddr),
common.HexToAddress(cfg.MessengerAddr),
common.HexToAddress(cfg.GatewayRouterAddr),
}
// Optional gateways.
// Optional erc20 gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
}
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
if common.HexToAddress(cfg.BatchBridgeGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
}
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList)
f := &L2FetcherLogic{
db: db,
@@ -112,8 +78,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
cfg: cfg,
client: client,
addressList: addressList,
gatewayList: gatewayList,
parser: NewL2EventParser(cfg, client),
parser: NewL2EventParser(),
}
reg := prometheus.DefaultRegisterer
@@ -162,7 +127,42 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
blockTimestampsMap[block.NumberU64()] = block.Time()
for _, tx := range block.Transactions() {
if tx.IsL1MessageTx() {
txTo := tx.To()
if txTo == nil {
continue
}
toAddress := txTo.String()
// GatewayRouter: L2 withdrawal.
if toAddress == f.cfg.GatewayRouterAddr {
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, nil, nil, receiptErr
}
// Check if the transaction is failed
if receipt.Status == types.ReceiptStatusFailed {
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, signerErr := signer.Sender(tx)
if signerErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
return nil, nil, nil, signerErr
}
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
L2TxHash: tx.Hash().String(),
MessageType: int(orm.MessageTypeL2SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L2BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(orm.TxStatusTypeSentTxReverted),
})
}
}
if tx.Type() == types.L1MessageTxType {
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
@@ -174,43 +174,11 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
l2RevertedRelayedMessageTxs = append(l2RevertedRelayedMessageTxs, &orm.CrossMessage{
MessageHash: common.BytesToHash(crypto.Keccak256(tx.AsL1MessageTx().Data)).String(),
L2TxHash: tx.Hash().String(),
TxStatus: int(btypes.TxStatusTypeRelayTxReverted),
TxStatus: int(orm.TxStatusTypeRelayTxReverted),
L2BlockNumber: receipt.BlockNumber.Uint64(),
MessageType: int(btypes.MessageTypeL1SentMessage),
MessageType: int(orm.MessageTypeL1SentMessage),
})
}
continue
}
// Gateways: L2 withdrawal.
if !isTransactionToGateway(tx, f.gatewayList) {
continue
}
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, nil, nil, receiptErr
}
// Check if the transaction is failed
if receipt.Status == types.ReceiptStatusFailed {
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, signerErr := signer.Sender(tx)
if signerErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
return nil, nil, nil, signerErr
}
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
L2TxHash: tx.Hash().String(),
MessageType: int(btypes.MessageTypeL2SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L2BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(btypes.TxStatusTypeSentTxReverted),
})
}
}
}
@@ -224,7 +192,7 @@ func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]
Addresses: f.addressList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 9)
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L2WithdrawETHSig
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
query.Topics[0][2] = backendabi.L2WithdrawERC721Sig
@@ -232,8 +200,6 @@ func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]
query.Topics[0][4] = backendabi.L2SentMessageEventSig
query.Topics[0][5] = backendabi.L2RelayedMessageEventSig
query.Topics[0][6] = backendabi.L2FailedRelayedMessageEventSig
query.Topics[0][7] = backendabi.L2BridgeBatchDistributeSig
query.Topics[0][8] = backendabi.L2BridgeBatchDistributeFailedSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
@@ -269,17 +235,16 @@ func (f *L2FetcherLogic) L2Fetcher(ctx context.Context, from, to uint64, lastBlo
return false, 0, common.Hash{}, nil, err
}
l2WithdrawMessages, l2RelayedMessages, l2BridgeBatchDepositMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L2 event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
res := L2FilterResult{
WithdrawMessages: l2WithdrawMessages,
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
OtherRevertedTxs: revertedUserTxs,
BridgeBatchDepositMessage: l2BridgeBatchDepositMessages,
WithdrawMessages: l2WithdrawMessages,
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
OtherRevertedTxs: revertedUserTxs,
}
f.updateMetrics(res)
@@ -291,47 +256,26 @@ func (f *L2FetcherLogic) updateMetrics(res L2FilterResult) {
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_gateway_router_transaction").Add(float64(len(res.OtherRevertedTxs)))
for _, withdrawMessage := range res.WithdrawMessages {
switch btypes.TokenType(withdrawMessage.TokenType) {
case btypes.TokenTypeETH:
switch orm.TokenType(withdrawMessage.TokenType) {
case orm.TokenTypeETH:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_eth").Add(1)
case btypes.TokenTypeERC20:
case orm.TokenTypeERC20:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc20").Add(1)
case btypes.TokenTypeERC721:
case orm.TokenTypeERC721:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc721").Add(1)
case btypes.TokenTypeERC1155:
case orm.TokenTypeERC1155:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc1155").Add(1)
}
}
for _, relayedMessage := range res.RelayedMessages {
switch btypes.TxStatusType(relayedMessage.TxStatus) {
case btypes.TxStatusTypeRelayed:
switch orm.TxStatusType(relayedMessage.TxStatus) {
case orm.TxStatusTypeRelayed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_relayed_message").Add(1)
case btypes.TxStatusTypeFailedRelayed:
case orm.TxStatusTypeFailedRelayed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_relayed_message").Add(1)
case btypes.TxStatusTypeRelayTxReverted:
case orm.TxStatusTypeRelayTxReverted:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_reverted_relayed_message_transaction").Add(1)
}
}
for _, bridgeBatchDepositMessage := range res.BridgeBatchDepositMessage {
switch btypes.TxStatusType(bridgeBatchDepositMessage.TxStatus) {
case btypes.TxStatusBridgeBatchDistribute:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_bridge_batch_distribute_message").Add(1)
case btypes.TxStatusBridgeBatchDistributeFailed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_bridge_batch_distribute_failed_message").Add(1)
}
}
}
func isTransactionToGateway(tx *types.Transaction, gatewayList []common.Address) bool {
if tx.To() == nil {
return false
}
for _, gateway := range gatewayList {
if *tx.To() == gateway {
return true
}
}
return false
}

View File

@@ -2,14 +2,30 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
btypes "scroll-tech/bridge-history-api/internal/types"
// BatchStatusType represents the type of batch status.
type BatchStatusType int
// Constants for BatchStatusType.
const (
BatchStatusTypeUnknown BatchStatusType = iota
BatchStatusTypeCommitted
BatchStatusTypeReverted
BatchStatusTypeFinalized
)
// UpdateStatusType represents the whether batch info is updated in message table.
type UpdateStatusType int
// Constants for UpdateStatusType.
const (
UpdateStatusTypeUnupdated UpdateStatusType = iota
UpdateStatusTypeUpdated
)
// BatchEvent represents a batch event.
@@ -46,7 +62,7 @@ func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64,
db = db.Model(&BatchEvent{})
db = db.Order("l1_block_number desc")
if err := db.First(&batch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("failed to get batch synced height in db, error: %w", err)
@@ -54,71 +70,50 @@ func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64,
return batch.L1BlockNumber, nil
}
// GetLastUpdatedFinalizedBlockHeight returns the last updated finalized block height in db.
func (c *BatchEvent) GetLastUpdatedFinalizedBlockHeight(ctx context.Context) (uint64, error) {
var batch BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("batch_status = ?", btypes.BatchStatusTypeFinalized)
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUpdated)
db = db.Order("batch_index desc")
if err := db.First(&batch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
// No finalized batch found, return genesis batch's end block number.
return 0, nil
}
return 0, fmt.Errorf("failed to get last updated finalized block height, error: %w", err)
}
return batch.EndBlockNumber, nil
}
// GetUnupdatedFinalizedBatchesLEBlockHeight returns the finalized batches with end block <= given block height in db.
func (c *BatchEvent) GetUnupdatedFinalizedBatchesLEBlockHeight(ctx context.Context, blockHeight uint64) ([]*BatchEvent, error) {
// GetFinalizedBatchesLEBlockHeight returns the finalized batches with end block <= given block height in db.
func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, blockHeight uint64) ([]*BatchEvent, error) {
var batches []*BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("end_block_number <= ?", blockHeight)
db = db.Where("batch_status = ?", btypes.BatchStatusTypeFinalized)
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
db = db.Where("batch_status = ?", BatchStatusTypeFinalized)
db = db.Where("update_status = ?", UpdateStatusTypeUnupdated)
db = db.Order("batch_index asc")
if err := db.Find(&batches).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get unupdated finalized batches >= block height, error: %w", err)
return nil, fmt.Errorf("failed to get batches >= block height, error: %w", err)
}
return batches, nil
}
// InsertOrUpdateBatchEvents inserts a new batch event or updates an existing one based on the BatchStatusType.
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent) error {
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent, dbTX ...*gorm.DB) error {
for _, l1BatchEvent := range l1BatchEvents {
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&BatchEvent{})
updateFields := make(map[string]interface{})
switch btypes.BatchStatusType(l1BatchEvent.BatchStatus) {
case btypes.BatchStatusTypeCommitted:
// Use the clause to either insert or ignore on conflict
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "batch_hash"}},
DoNothing: true,
})
switch BatchStatusType(l1BatchEvent.BatchStatus) {
case BatchStatusTypeCommitted:
if err := db.Create(l1BatchEvent).Error; err != nil {
return fmt.Errorf("failed to insert or ignore batch event, error: %w", err)
return fmt.Errorf("failed to insert batch event, error: %w", err)
}
case btypes.BatchStatusTypeFinalized:
case BatchStatusTypeFinalized:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = btypes.BatchStatusTypeFinalized
updateFields["l1_block_number"] = l1BatchEvent.L1BlockNumber
updateFields["batch_status"] = BatchStatusTypeFinalized
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
case btypes.BatchStatusTypeReverted:
case BatchStatusTypeReverted:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_status != ?", btypes.BatchStatusTypeFinalized)
updateFields["batch_status"] = btypes.BatchStatusTypeReverted
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = BatchStatusTypeReverted
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
@@ -137,7 +132,7 @@ func (c *BatchEvent) UpdateBatchEventStatus(ctx context.Context, batchIndex uint
db = db.Model(&BatchEvent{})
db = db.Where("batch_index = ?", batchIndex)
updateFields := map[string]interface{}{
"update_status": btypes.UpdateStatusTypeUpdated,
"update_status": UpdateStatusTypeUpdated,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event status, batchIndex: %d, error: %w", batchIndex, err)

View File

@@ -1,163 +0,0 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/bridge-history-api/internal/types"
)
// BridgeBatchDepositEvent represents the bridge batch deposit event.
type BridgeBatchDepositEvent struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id;primary_key"`
TokenType int `json:"token_type" gorm:"column:token_type"`
Sender string `json:"sender" gorm:"column:sender"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
TokenAmount string `json:"token_amount" gorm:"column:token_amount"`
Fee string `json:"fee" gorm:"column:fee"`
L1TokenAddress string `json:"l1_token_address" gorm:"column:l1_token_address"`
L2TokenAddress string `json:"l2_token_address" gorm:"column:l2_token_address"`
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
L2BlockNumber uint64 `json:"l2_block_number" gorm:"column:l2_block_number"`
L1TxHash string `json:"l1_tx_hash" gorm:"column:l1_tx_hash"`
L1LogIndex uint `json:"l1_log_index" gorm:"column:l1_log_index"`
L2TxHash string `json:"l2_tx_hash" gorm:"column:l2_tx_hash"`
TxStatus int `json:"tx_status" gorm:"column:tx_status"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"column:block_timestamp"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
}
// TableName returns the table name for the BridgeBatchDepositEvent model.
func (*BridgeBatchDepositEvent) TableName() string {
return "bridge_batch_deposit_event_v2"
}
// NewBridgeBatchDepositEvent returns a new instance of BridgeBatchDepositEvent.
func NewBridgeBatchDepositEvent(db *gorm.DB) *BridgeBatchDepositEvent {
return &BridgeBatchDepositEvent{db: db}
}
// GetTxsByAddress returns the txs by address
func (c *BridgeBatchDepositEvent) GetTxsByAddress(ctx context.Context, sender string) ([]*BridgeBatchDepositEvent, error) {
var messages []*BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get all txs by sender address, sender: %v, error: %w", sender, err)
}
return messages, nil
}
// GetMessagesByTxHashes retrieves all BridgeBatchDepositEvent from the database that match the provided transaction hashes.
func (c *BridgeBatchDepositEvent) GetMessagesByTxHashes(ctx context.Context, txHashes []string) ([]*BridgeBatchDepositEvent, error) {
var messages []*BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("l1_tx_hash in (?) or l2_tx_hash in (?)", txHashes, txHashes)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to GetMessagesByTxHashes by tx hashes, tx hashes: %v, error: %w", txHashes, err)
}
return messages, nil
}
// GetMessageL1SyncedHeightInDB returns the l1 latest bridge batch deposit message height from the database
func (c *BridgeBatchDepositEvent) GetMessageL1SyncedHeightInDB(ctx context.Context) (uint64, error) {
var message BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Order("l1_block_number desc")
err := db.First(&message).Error
if err != nil && errors.Is(gorm.ErrRecordNotFound, err) {
return 0, nil
}
if err != nil {
return 0, fmt.Errorf("failed to get l1 latest processed height, error: %w", err)
}
return message.L1BlockNumber, nil
}
// GetMessageL2SyncedHeightInDB returns the l2 latest bridge batch deposit message height from the database
func (c *BridgeBatchDepositEvent) GetMessageL2SyncedHeightInDB(ctx context.Context) (uint64, error) {
var message BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Order("l2_block_number desc")
err := db.First(&message).Error
if err != nil && errors.Is(gorm.ErrRecordNotFound, err) {
return 0, nil
}
if err != nil {
return 0, fmt.Errorf("failed to get l2 latest processed height, error: %w", err)
}
return message.L2BlockNumber, nil
}
// InsertOrUpdateL1BridgeBatchDepositEvent inserts or updates a new L1 BridgeBatchDepositEvent
func (c *BridgeBatchDepositEvent) InsertOrUpdateL1BridgeBatchDepositEvent(ctx context.Context, l1BatchDepositEvents []*BridgeBatchDepositEvent) error {
if len(l1BatchDepositEvents) == 0 {
return nil
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "l1_tx_hash"}, {Name: "l1_log_index"}},
DoUpdates: clause.AssignmentColumns([]string{"token_amount", "fee", "l1_block_number", "l1_token_address", "tx_status", "block_timestamp"}),
})
if err := db.Create(l1BatchDepositEvents).Error; err != nil {
return fmt.Errorf("failed to insert message, error: %w", err)
}
return nil
}
// UpdateBatchEventStatus updates the tx_status of BridgeBatchDepositEvent given batch index
func (c *BridgeBatchDepositEvent) UpdateBatchEventStatus(ctx context.Context, distributeMessage *BridgeBatchDepositEvent) error {
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("batch_index = ?", distributeMessage.BatchIndex)
db = db.Where("token_type = ?", distributeMessage.TokenType)
updateFields := map[string]interface{}{
"l2_token_address": distributeMessage.L2TokenAddress,
"l2_block_number": distributeMessage.L2BlockNumber,
"l2_tx_hash": distributeMessage.L2TxHash,
"tx_status": types.TxStatusBridgeBatchDistribute,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to UpdateBatchEventStatus, batchIndex: %d, error: %w", distributeMessage.BatchIndex, err)
}
return nil
}
// UpdateDistributeFailedStatus updates the tx_status of BridgeBatchDepositEvent given batch index and senders
func (c *BridgeBatchDepositEvent) UpdateDistributeFailedStatus(ctx context.Context, batchIndex uint64, senders []string) error {
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("batch_index = ?", batchIndex)
db = db.Where("sender in (?)", senders)
updateFields := map[string]interface{}{
"tx_status": types.TxStatusBridgeBatchDistributeFailed,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to UpdateDistributeFailedStatus, batchIndex: %d, senders:%v, error: %w", batchIndex, senders, err)
}
return nil
}

View File

@@ -2,22 +2,82 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
"github.com/google/uuid"
"github.com/scroll-tech/go-ethereum/common"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
"scroll-tech/bridge-history-api/internal/types"
// TokenType represents the type of token.
type TokenType int
btypes "scroll-tech/bridge-history-api/internal/types"
// Constants for TokenType.
const (
TokenTypeUnknown TokenType = iota
TokenTypeETH
TokenTypeERC20
TokenTypeERC721
TokenTypeERC1155
)
// MessageType represents the type of message.
type MessageType int
// Constants for MessageType.
const (
MessageTypeUnknown MessageType = iota
MessageTypeL1SentMessage
MessageTypeL2SentMessage
)
// TxStatusType represents the status of a transaction.
type TxStatusType int
// Constants for TxStatusType.
const (
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
// from a later status (e.g., relayed) back to "sent".
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
TxStatusTypeSent TxStatusType = iota
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
TxStatusTypeRelayed // Terminal status.
// FailedRelayedMessage event: encoded tx failed, cannot retry. e.g., https://sepolia.scrollscan.com/tx/0xfc7d3ea5ec8dc9b664a5a886c3b33d21e665355057601033481a439498efb79a
TxStatusTypeFailedRelayed // Terminal status.
// In some cases, user can retry with a larger gas limit. e.g., https://sepolia.scrollscan.com/tx/0x7323a7ba29492cb47d92206411be99b27896f2823cee0633a596b646b73f1b5b
TxStatusTypeRelayTxReverted
TxStatusTypeSkipped
TxStatusTypeDropped // Terminal status.
)
// RollupStatusType represents the status of a rollup.
type RollupStatusType int
// Constants for RollupStatusType.
const (
RollupStatusTypeUnknown RollupStatusType = iota
RollupStatusTypeFinalized // only batch finalized status is used.
)
// MessageQueueEventType represents the type of message queue event.
type MessageQueueEventType int
// Constants for MessageQueueEventType.
const (
MessageQueueEventTypeUnknown MessageQueueEventType = iota
MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction
)
// MessageQueueEvent struct represents the details of a batch event.
type MessageQueueEvent struct {
EventType btypes.MessageQueueEventType
EventType MessageQueueEventType
QueueIndex uint64
// Track replay tx hash and refund tx hash.
@@ -73,27 +133,27 @@ func NewCrossMessage(db *gorm.DB) *CrossMessage {
}
// GetMessageSyncedHeightInDB returns the latest synced cross message height from the database for a given message type.
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType btypes.MessageType) (uint64, error) {
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType MessageType) (uint64, error) {
var message CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", messageType)
switch {
case messageType == btypes.MessageTypeL1SentMessage:
case messageType == MessageTypeL1SentMessage:
db = db.Order("l1_block_number desc")
case messageType == btypes.MessageTypeL2SentMessage:
case messageType == MessageTypeL2SentMessage:
db = db.Order("l2_block_number desc")
}
if err := db.First(&message).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
}
switch {
case messageType == btypes.MessageTypeL1SentMessage:
case messageType == MessageTypeL1SentMessage:
return message.L1BlockNumber, nil
case messageType == btypes.MessageTypeL2SentMessage:
case messageType == MessageTypeL2SentMessage:
return message.L2BlockNumber, nil
default:
return 0, fmt.Errorf("invalid message type: %v", messageType)
@@ -105,11 +165,11 @@ func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*Cro
var message CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("rollup_status = ?", RollupStatusTypeFinalized)
db = db.Order("message_nonce desc")
if err := db.First(&message).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
@@ -124,14 +184,14 @@ func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBl
db = db.Model(&CrossMessage{})
db = db.Where("l2_block_number >= ?", startBlock)
db = db.Where("l2_block_number <= ?", endBlock)
db = db.Where("tx_status != ?", types.TxStatusTypeSentTxReverted)
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("tx_status != ?", TxStatusTypeSentTxReverted)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Order("message_nonce asc")
if err := db.Find(&messages).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get L2 withdrawals by block range, error: %v", err)
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
}
return messages, nil
}
@@ -153,11 +213,11 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", TxStatusTypeSent)
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(10000)
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get L2 claimable withdrawal messages by sender address, sender: %v, error: %w", sender, err)
}
@@ -169,7 +229,7 @@ func (c *CrossMessage) GetL2WithdrawalsByAddress(ctx context.Context, sender str
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
@@ -194,53 +254,23 @@ func (c *CrossMessage) GetTxsByAddress(ctx context.Context, sender string) ([]*C
}
// UpdateL1MessageQueueEventsInfo updates the information about L1 message queue events in the database.
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent) error {
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent, dbTX ...*gorm.DB) error {
// update tx statuses.
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
db = db.Where("tx_status != ?", TxStatusTypeFailedRelayed)
db = db.Where("tx_status != ?", TxStatusTypeDropped)
txStatusUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case btypes.MessageQueueEventTypeQueueTransaction:
continue
case btypes.MessageQueueEventTypeDequeueTransaction:
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", types.TxStatusTypeRelayed)
db = db.Where("tx_status != ?", types.TxStatusTypeDropped)
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeSkipped
case btypes.MessageQueueEventTypeDropTransaction:
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", types.TxStatusTypeRelayed)
db = db.Where("tx_status != ?", types.TxStatusTypeDropped)
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeDropped
case btypes.MessageQueueEventTypeResetDequeuedTransaction:
db = db.Where("tx_status = ?", types.TxStatusTypeSkipped)
// reset skipped messages that the nonce is greater than or equal to the queue index.
db = db.Where("message_nonce >= ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeSent
}
if err := db.Updates(txStatusUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
}
}
// update tx hashes of replay and refund.
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
txHashUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case btypes.MessageQueueEventTypeDequeueTransaction, btypes.MessageQueueEventTypeResetDequeuedTransaction:
continue
case btypes.MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.
case MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
// replayMessage case:
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
// Transaction reverted in L2: https://sepolia.scrollscan.com/tx/0xde6ef307a7da255888aad7a4c40a6b8c886e46a8a05883070bbf18b736cbfb8c
@@ -250,14 +280,45 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
// because in replayMessage, queue index != message nonce.
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
case btypes.MessageQueueEventTypeDropTransaction:
txStatusUpdateFields["tx_status"] = TxStatusTypeSent // reset status to "sent".
case MessageQueueEventTypeDequeueTransaction:
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = TxStatusTypeSkipped
case MessageQueueEventTypeDropTransaction:
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = TxStatusTypeDropped
}
if err := db.Updates(txStatusUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
}
}
// update tx hashes of replay and refund.
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
txHashUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
case MessageQueueEventTypeDropTransaction:
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txHashUpdateFields["l1_refund_tx_hash"] = l1MessageQueueEvent.TxHash.String()
}
if err := db.Updates(txHashUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
// Check if there are fields to update to avoid empty update operation (skip message).
if len(txHashUpdateFields) > 0 {
if err := db.Updates(txHashUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
}
}
}
return nil
@@ -267,12 +328,12 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
func (c *CrossMessage) UpdateBatchStatusOfL2Withdrawals(ctx context.Context, startBlockNumber, endBlockNumber, batchIndex uint64) error {
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("l2_block_number >= ?", startBlockNumber)
db = db.Where("l2_block_number <= ?", endBlockNumber)
updateFields := make(map[string]interface{})
updateFields["batch_index"] = batchIndex
updateFields["rollup_status"] = btypes.RollupStatusTypeFinalized
updateFields["rollup_status"] = RollupStatusTypeFinalized
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch status of L2 sent messages, start: %v, end: %v, index: %v, error: %w", startBlockNumber, endBlockNumber, batchIndex, err)
}
@@ -301,11 +362,14 @@ func (c *CrossMessage) UpdateBatchIndexRollupStatusMerkleProofOfL2Messages(ctx c
}
// InsertOrUpdateL1Messages inserts or updates a list of L1 cross messages into the database.
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage) error {
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
@@ -320,14 +384,18 @@ func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []
}
// InsertOrUpdateL2Messages inserts or updates a list of L2 cross messages into the database.
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage) error {
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
// The merkle_proof is updated separately in batch status updates and hence is not included here.
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoUpdates: clause.AssignmentColumns([]string{"sender", "receiver", "token_type", "l2_block_number", "l2_tx_hash", "l1_token_address", "l2_token_address", "token_ids", "token_amounts", "message_type", "block_timestamp", "message_from", "message_to", "message_value", "message_data", "message_nonce"}),
@@ -338,60 +406,31 @@ func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []
return nil
}
// InsertFailedL2GatewayTxs inserts a list of transactions that failed to interact with the L2 gateways into the database.
// To resolve unique index confliction, L2 tx hash is used as the MessageHash.
// The OnConflict clause is used to prevent inserting same failed transactions multiple times.
func (c *CrossMessage) InsertFailedL2GatewayTxs(ctx context.Context, messages []*CrossMessage) error {
// InsertFailedGatewayRouterTxs inserts a list of transactions that failed to interact with the gateway router into the database.
// These failed transactions are only fetched once, so they are inserted without checking for duplicates.
// To resolve unique index confliction, a random UUID will be generated and used as the MessageHash.
func (c *CrossMessage) InsertFailedGatewayRouterTxs(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
for _, message := range messages {
message.MessageHash = message.L2TxHash
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoNothing: true,
})
if err := db.Create(&messages).Error; err != nil {
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
}
return nil
}
// InsertFailedL1GatewayTxs inserts a list of transactions that failed to interact with the L1 gateways into the database.
// To resolve unique index confliction, L1 tx hash is used as the MessageHash.
// The OnConflict clause is used to prevent inserting same failed transactions multiple times.
func (c *CrossMessage) InsertFailedL1GatewayTxs(ctx context.Context, messages []*CrossMessage) error {
if len(messages) == 0 {
return nil
}
for _, message := range messages {
message.MessageHash = message.L1TxHash
message.MessageHash = uuid.New().String()
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoNothing: true,
})
if err := db.Create(&messages).Error; err != nil {
if err := db.Create(messages).Error; err != nil {
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
}
return nil
}
// InsertOrUpdateL2RelayedMessagesOfL1Deposits inserts or updates the database with a list of L2 relayed messages related to L1 deposits.
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage) error {
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(l2RelayedMessages) == 0 {
return nil
}
@@ -409,7 +448,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
mergedL2RelayedMessages := make(map[string]*CrossMessage)
for _, message := range l2RelayedMessages {
if existing, found := mergedL2RelayedMessages[message.MessageHash]; found {
if types.TxStatusType(message.TxStatus) == types.TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
mergedL2RelayedMessages[message.MessageHash] = message
}
} else {
@@ -420,7 +459,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
for _, msg := range mergedL2RelayedMessages {
uniqueL2RelayedMessages = append(uniqueL2RelayedMessages, msg)
}
// Do not update tx status of successfully relayed messages,
// Do not update tx status of successfully or failed relayed messages,
// because if a message is handled, the later relayed message tx would be reverted.
// ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L2/L2ScrollMessenger.sol#L102
// e.g.,
@@ -436,8 +475,9 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
Exprs: []clause.Expression{
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeDropped},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
),
},
},
@@ -449,7 +489,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
}
// InsertOrUpdateL1RelayedMessagesOfL2Withdrawals inserts or updates the database with a list of L1 relayed messages related to L2 withdrawals.
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage) error {
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(l1RelayedMessages) == 0 {
return nil
}
@@ -467,7 +507,7 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
mergedL1RelayedMessages := make(map[string]*CrossMessage)
for _, message := range l1RelayedMessages {
if existing, found := mergedL1RelayedMessages[message.MessageHash]; found {
if types.TxStatusType(message.TxStatus) == types.TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
mergedL1RelayedMessages[message.MessageHash] = message
}
} else {
@@ -479,6 +519,9 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
uniqueL1RelayedMessages = append(uniqueL1RelayedMessages, msg)
}
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
@@ -488,8 +531,9 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
Exprs: []clause.Expression{
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeDropped},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
),
},
},

View File

@@ -15,7 +15,6 @@ CREATE TABLE batch_event_v2
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS unique_idx_be_batch_hash ON batch_event_v2 (batch_hash);
CREATE INDEX IF NOT EXISTS idx_be_l1_block_number ON batch_event_v2 (l1_block_number);
CREATE INDEX IF NOT EXISTS idx_be_batch_index ON batch_event_v2 (batch_index);
CREATE INDEX IF NOT EXISTS idx_be_batch_index_batch_hash ON batch_event_v2 (batch_index, batch_hash);

View File

@@ -1,38 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE bridge_batch_deposit_event_v2
(
id BIGSERIAL PRIMARY KEY,
token_type SMALLINT NOT NULL,
sender VARCHAR NOT NULL,
batch_index BIGINT DEFAULT NULL,
token_amount VARCHAR NOT NULL,
fee VARCHAR NOT NULL,
l1_token_address VARCHAR DEFAULT NULL,
l2_token_address VARCHAR DEFAULT NULL,
l1_block_number BIGINT DEFAULT NULL,
l2_block_number BIGINT DEFAULT NULL,
l1_tx_hash VARCHAR DEFAULT NULL,
l1_log_index INTEGER DEFAULT NULL,
l2_tx_hash VARCHAR DEFAULT NULL,
tx_status SMALLINT NOT NULL,
block_timestamp BIGINT NOT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX idx_l1hash_l1logindex ON bridge_batch_deposit_event_v2 (l1_tx_hash, l1_log_index);
CREATE INDEX IF NOT EXISTS idx_bbde_batchidx_sender ON bridge_batch_deposit_event_v2 (batch_index, sender);
CREATE INDEX IF NOT EXISTS idx_bbde_l1_block_number ON bridge_batch_deposit_event_v2 (l1_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l2_block_number ON bridge_batch_deposit_event_v2 (l2_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l1_tx_hash ON bridge_batch_deposit_event_v2 (l1_tx_hash DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l2_tx_hash ON bridge_batch_deposit_event_v2 (l2_tx_hash DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_sender_block_timestamp ON bridge_batch_deposit_event_v2 (sender, block_timestamp DESC);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS bridge_batch_deposit_event_v2;
-- +goose StatementEnd

View File

@@ -27,9 +27,9 @@ func Route(router *gin.Engine, conf *config.Config, reg prometheus.Registerer) {
r := router.Group("api/")
r.GET("/txs", api.TxsByAddressCtl.GetTxsByAddress)
r.GET("/l2/withdrawals", api.L2WithdrawalsByAddressCtl.GetL2WithdrawalsByAddress)
r.GET("/l2/unclaimed/withdrawals", api.L2UnclaimedWithdrawalsByAddressCtl.GetL2UnclaimedWithdrawalsByAddress)
r.GET("/txs", api.HistoryCtrler.GetTxsByAddress)
r.GET("/l2/withdrawals", api.HistoryCtrler.GetL2WithdrawalsByAddress)
r.GET("/l2/unclaimed/withdrawals", api.HistoryCtrler.GetL2UnclaimedWithdrawalsByAddress)
r.POST("/txsbyhashes", api.TxsByHashesCtl.PostQueryTxsByHashes)
r.POST("/txsbyhashes", api.HistoryCtrler.PostQueryTxsByHashes)
}

View File

@@ -1,94 +0,0 @@
package types
// TxStatusType represents the status of a transaction.
type TxStatusType int
// Constants for TxStatusType.
const (
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
// from a later status (e.g., relayed) back to "sent".
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
TxStatusTypeSent TxStatusType = iota
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
TxStatusTypeRelayed // Terminal status.
// TxStatusTypeFailedRelayed Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeFailedRelayed
// TxStatusTypeRelayTxReverted Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeRelayTxReverted
TxStatusTypeSkipped
TxStatusTypeDropped // Terminal status.
// TxStatusBridgeBatchDeposit use deposit token to bridge batch deposit contract
TxStatusBridgeBatchDeposit
// TxStatusBridgeBatchDistribute bridge batch deposit contract distribute tokens to user success
TxStatusBridgeBatchDistribute
// TxStatusBridgeBatchDistributeFailed bridge batch deposit contract distribute tokens to user failed
TxStatusBridgeBatchDistributeFailed
)
// TokenType represents the type of token.
type TokenType int
// Constants for TokenType.
const (
TokenTypeUnknown TokenType = iota
TokenTypeETH
TokenTypeERC20
TokenTypeERC721
TokenTypeERC1155
)
// MessageType represents the type of message.
type MessageType int
// Constants for MessageType.
const (
MessageTypeUnknown MessageType = iota
MessageTypeL1SentMessage
MessageTypeL2SentMessage
MessageTypeL1BatchDeposit
)
// RollupStatusType represents the status of a rollup.
type RollupStatusType int
// Constants for RollupStatusType.
const (
RollupStatusTypeUnknown RollupStatusType = iota
RollupStatusTypeFinalized // only batch finalized status is used.
)
// MessageQueueEventType represents the type of message queue event.
type MessageQueueEventType int
// Constants for MessageQueueEventType.
const (
MessageQueueEventTypeUnknown MessageQueueEventType = iota
MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction
MessageQueueEventTypeResetDequeuedTransaction
)
// BatchStatusType represents the type of batch status.
type BatchStatusType int
// Constants for BatchStatusType.
const (
BatchStatusTypeUnknown BatchStatusType = iota
BatchStatusTypeCommitted
BatchStatusTypeReverted
BatchStatusTypeFinalized
)
// UpdateStatusType represents the whether batch info is updated in message table.
type UpdateStatusType int
// Constants for UpdateStatusType.
const (
UpdateStatusTypeUnupdated UpdateStatusType = iota
UpdateStatusTypeUpdated
)

View File

@@ -4,6 +4,8 @@ import (
"net/http"
"github.com/gin-gonic/gin"
"scroll-tech/bridge-history-api/internal/orm"
)
const (
@@ -77,18 +79,17 @@ type TxHistoryInfo struct {
ReplayTxHash string `json:"replay_tx_hash"`
RefundTxHash string `json:"refund_tx_hash"`
MessageHash string `json:"message_hash"`
TokenType TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
TokenType orm.TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
TokenIDs []string `json:"token_ids"` // only for erc721 and erc1155
TokenAmounts []string `json:"token_amounts"` // for eth and erc20, the length is 1, for erc721 and erc1155, the length could be > 1
MessageType MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
MessageType orm.MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
L1TokenAddress string `json:"l1_token_address"`
L2TokenAddress string `json:"l2_token_address"`
BlockNumber uint64 `json:"block_number"`
TxStatus TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
TxStatus orm.TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
CounterpartChainTx *CounterpartChainTx `json:"counterpart_chain_tx"`
ClaimInfo *ClaimInfo `json:"claim_info"`
BlockTimestamp uint64 `json:"block_timestamp"`
BatchDepositFee string `json:"batch_deposit_fee"` // only for bridge batch deposit
}
// RenderJSON renders response with json

View File

@@ -38,7 +38,7 @@ func GetBlockNumber(ctx context.Context, client *ethclient.Client, confirmations
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return errors.New("event signature mismatch")
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
@@ -66,68 +66,32 @@ func ComputeMessageHash(
return common.BytesToHash(crypto.Keccak256(data))
}
// GetBatchVersionAndBlockRangeFromCalldata find the block range from calldata, both inclusive.
func GetBatchVersionAndBlockRangeFromCalldata(txData []byte) (uint8, uint64, uint64, error) {
const methodIDLength = 4
if len(txData) < methodIDLength {
return 0, 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
}
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
// GetBatchRangeFromCalldata find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldata(calldata []byte) (uint64, uint64, error) {
method := backendabi.IScrollChainABI.Methods["commitBatch"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
// special case: import genesis batch
method = backendabi.IScrollChainABI.Methods["importGenesisBatch"]
_, err2 := method.Inputs.Unpack(calldata[4:])
if err2 == nil {
// genesis batch
return 0, 0, nil
}
// none of "commitBatch" and "importGenesisBatch" match, give up
return 0, 0, err
}
values, err := method.Inputs.Unpack(txData[methodIDLength:])
args := commitBatchArgs{}
err = method.Inputs.Copy(&args, values)
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
}
var chunks [][]byte
var version uint8
if method.Name == "importGenesisBatch" {
return 0, 0, 0, nil
} else if method.Name == "commitBatch" {
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
var args commitBatchArgs
if err = method.Inputs.Copy(&args, values); err != nil {
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
}
chunks = args.Chunks
version = args.Version
} else if method.Name == "commitBatchWithBlobProof" {
type commitBatchWithBlobProofArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
BlobDataProof []byte
}
var args commitBatchWithBlobProofArgs
if err = method.Inputs.Copy(&args, values); err != nil {
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
}
chunks = args.Chunks
version = args.Version
} else if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
if len(values) < 3 {
return 0, 0, 0, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
}
var ok bool
version, ok = values[0].(uint8)
if !ok {
return 0, 0, 0, fmt.Errorf("invalid version type: %T", values[0])
}
return version, 0, 0, nil
return 0, 0, err
}
var startBlock uint64
@@ -136,48 +100,19 @@ func GetBatchVersionAndBlockRangeFromCalldata(txData []byte) (uint8, uint64, uin
// decode blocks from chunk and assume that there's no empty chunk
// | 1 byte | 60 bytes | ... | 60 bytes |
// | num blocks | block 1 | ... | block n |
if len(chunks) == 0 {
return 0, 0, 0, errors.New("invalid chunks")
if len(args.Chunks) == 0 {
return 0, 0, errors.New("invalid chunks")
}
chunk := chunks[0]
chunk := args.Chunks[0]
block := chunk[1:61] // first block in chunk
startBlock = binary.BigEndian.Uint64(block[0:8])
chunk = chunks[len(chunks)-1]
chunk = args.Chunks[len(args.Chunks)-1]
lastBlockIndex := int(chunk[0]) - 1
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
finishBlock = binary.BigEndian.Uint64(block[0:8])
return version, startBlock, finishBlock, err
}
// GetParentBatchHashFromCalldata gets the parent batch hash from calldata.
// It only supports commitBatches and commitAndFinalizeBatch, which only accept batches >= v7.
func GetParentBatchHashFromCalldata(txData []byte) (common.Hash, error) {
const methodIDLength = 4
if len(txData) < methodIDLength {
return common.Hash{}, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
}
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
}
values, err := method.Inputs.Unpack(txData[methodIDLength:])
if err != nil {
return common.Hash{}, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
}
if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
if len(values) < 3 {
return common.Hash{}, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
}
parentBatchHash, ok := values[1].([32]byte)
if !ok {
return common.Hash{}, fmt.Errorf("invalid parentBatchHash type: %T", values[1])
}
return common.BytesToHash(parentBatchHash[:]), nil
}
return common.Hash{}, fmt.Errorf("method %s does not support parent batch header", method.Name)
return startBlock, finishBlock, err
}
// GetBlocksInRange gets a batch of blocks for a block range [start, end] inclusive.

File diff suppressed because one or more lines are too long

View File

@@ -91,7 +91,7 @@ linters-settings:
#local-prefixes: github.com/org/project
gocyclo:
# minimal code complexity to report, 30 by default (but we recommend 10-20)
min-complexity: 40
min-complexity: 30
maligned:
# print struct with more effective memory layout or not, false by default
suggest-new: true
@@ -104,12 +104,10 @@ linters-settings:
# minimal occurrences count to trigger, 3 by default
min-occurrences: 3
depguard:
rules:
main:
files:
- $all
deny:
- pkg: "github.com/davecgh/go-spew/spew"
list-type: blacklist
include-go-root: false
packages:
- github.com/davecgh/go-spew/spew
misspell:
# Correct spellings using locale preferences for US or UK.
# Default is to use a neutral variety of English.
@@ -254,9 +252,6 @@ issues:
- linters:
- wsl
text: "expressions should not be cuddled with declarations or returns"
- linters:
- govet
text: 'shadow: declaration of "(err|ctx)" shadows declaration at'
# Independently from option `exclude` we use default exclude patterns,
# it can be disabled by this option. To list all

View File

@@ -1,30 +0,0 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build blob_uploader
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/blob_uploader/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/blob_uploader
# Pull blob_uploader into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/blob_uploader /bin/
WORKDIR /app
ENTRYPOINT ["blob_uploader"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM golang:1.20-alpine3.16 as base
WORKDIR /src
COPY go.mod* ./
@@ -11,13 +11,11 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge-history-api/cmd/api && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/bridgehistoryapi-api
cd /src/bridge-history-api/cmd/api && go build -v -p 4 -o /bin/bridgehistoryapi-api
# Pull bridgehistoryapi-api into a second stage deploy ubuntu container
FROM ubuntu:20.04
# Pull bridgehistoryapi-api into a second stage deploy alpine container
FROM alpine:latest
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
COPY --from=builder /bin/bridgehistoryapi-api /bin/
WORKDIR /app
ENTRYPOINT ["bridgehistoryapi-api"]
ENTRYPOINT ["bridgehistoryapi-api"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM golang:1.20-alpine3.16 as base
WORKDIR /src
COPY ./bridge-history-api/go.* ./
@@ -10,11 +10,10 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge-history-api/cmd/db_cli && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
cd /src/bridge-history-api/cmd/db_cli && go build -v -p 4 -o /bin/db_cli
# Pull db_cli into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-ldl"
# Pull db_cli into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/db_cli /bin/
WORKDIR /app
ENTRYPOINT ["db_cli"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM golang:1.20-alpine3.16 as base
WORKDIR /src
COPY go.mod* ./
@@ -11,14 +11,11 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge-history-api/cmd/fetcher && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/bridgehistoryapi-fetcher
cd /src/bridge-history-api/cmd/fetcher && go build -v -p 4 -o /bin/bridgehistoryapi-fetcher
# Pull bridgehistoryapi-fetcher into a second stage deploy ubuntu container
FROM ubuntu:20.04
# Pull bridgehistoryapi-fetcher into a second stage deploy alpine container
FROM alpine:latest
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install ca-certificates vim netcat-openbsd net-tools curl -y
RUN update-ca-certificates
COPY --from=builder /bin/bridgehistoryapi-fetcher /bin/
WORKDIR /app
ENTRYPOINT ["bridgehistoryapi-fetcher"]
ENTRYPOINT ["bridgehistoryapi-fetcher"]

View File

@@ -1,31 +1,30 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as chef
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as chef
WORKDIR app
FROM chef as planner
COPY ./crates/ ./crates/
COPY ./Cargo.* ./
COPY ./rust-toolchain ./
COPY ./common/libzkp/impl/ .
RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./rust-toolchain ./
COPY ./common/libzkp/impl/rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./crates/ ./crates/
COPY ./Cargo.* ./
COPY .git .git
RUN cargo build --release -p libzkp-c
COPY ./common/libzkp/impl .
RUN cargo build --release
RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -34,16 +33,15 @@ RUN go mod download -x
# Build coordinator
FROM base as builder
COPY . .
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/libzkp/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api && mv ./build/bin/coordinator_api /bin/coordinator_api
RUN mv coordinator/internal/logic/libzkp/lib /bin/
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
# ENV CHAIN_ID=534353
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/coordinator_api /bin/

View File

@@ -4,5 +4,3 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-alpine-builder:1.20 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -15,12 +16,10 @@ RUN go mod download -x
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/coordinator/cmd/cron/ && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/coordinator_cron
cd /src/coordinator/cmd/cron/ && go build -v -p 4 -o /bin/coordinator_cron
# Pull coordinator into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
# Pull coordinator into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/coordinator_cron /bin/
WORKDIR /app
ENTRYPOINT ["coordinator_cron"]
ENTRYPOINT ["coordinator_cron"]

View File

@@ -4,5 +4,3 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-alpine-builder:1.20 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -16,11 +17,10 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/database/cmd && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
cd /src/database/cmd && go build -v -p 4 -o /bin/db_cli
# Pull db_cli into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-ldl"
# Pull db_cli into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/db_cli /bin/
WORKDIR /app
ENTRYPOINT ["db_cli"]

View File

@@ -4,5 +4,3 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build event_watcher
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
# Pull event_watcher into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/event_watcher /bin/
WORKDIR /app
ENTRYPOINT ["event_watcher"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-alpine-builder:1.20 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -16,15 +17,10 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/gas_oracle/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/gas_oracle
# Pull gas_oracle into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
cd /src/rollup/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
# Pull gas_oracle into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/gas_oracle /bin/
WORKDIR /app
ENTRYPOINT ["gas_oracle"]
ENTRYPOINT ["gas_oracle"]

View File

@@ -1,8 +1,5 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/
*target/*

View File

@@ -1,8 +1,8 @@
ifeq ($(GO_VERSION),)
GO_VERSION=1.21
GO_VERSION=1.20
endif
ifeq ($(RUST_VERSION),)
RUST_VERSION=nightly-2023-12-03
RUST_VERSION=nightly-2022-12-10
endif
ifeq ($(PYTHON_VERSION),)
PYTHON_VERSION=3.10

View File

@@ -1,6 +1,6 @@
ARG CUDA_VERSION=11.7.1
ARG GO_VERSION=1.22.12
ARG RUST_VERSION=nightly-2023-12-03
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG CARGO_CHEF_TAG=0.1.41
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
@@ -29,14 +29,7 @@ RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
# Install Go
ARG GO_VERSION
RUN rm -rf /usr/local/go
RUN if [ "$(uname -m)" = "x86_64" ]; then \
echo amd64 >/tmp/arch; \
elif [ "$(uname -m)" = "aarch64" ]; then \
echo arm64 >/tmp/arch; \
else \
echo "Unsupported architecture"; exit 1; \
fi
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-amd64.tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-amd64.tar.gz
RUN rm go${GO_VERSION}.1.linux-amd64.tar.gz
ENV PATH="/usr/local/go/bin:${PATH}"

View File

@@ -1,4 +1,4 @@
ARG GO_VERSION=1.21
ARG GO_VERSION=1.20
FROM golang:${GO_VERSION}-alpine

View File

@@ -1,5 +1,5 @@
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG CARGO_CHEF_TAG=0.1.41
FROM golang:${GO_VERSION}-alpine

View File

@@ -1,5 +1,5 @@
ARG GO_VERSION=1.22.12
ARG RUST_VERSION=nightly-2023-12-03
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG CARGO_CHEF_TAG=0.1.41
FROM ubuntu:20.04
@@ -25,14 +25,7 @@ RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
# Install Go
ARG GO_VERSION
RUN rm -rf /usr/local/go
RUN if [ "$(uname -m)" = "x86_64" ]; then \
echo amd64 >/tmp/arch; \
elif [ "$(uname -m)" = "aarch64" ]; then \
echo arm64 >/tmp/arch; \
else \
echo "Unsupported architecture"; exit 1; \
fi
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-amd64.tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-amd64.tar.gz
RUN rm go${GO_VERSION}.1.linux-amd64.tar.gz
ENV PATH="/usr/local/go/bin:${PATH}"

View File

@@ -1,5 +1,5 @@
ARG ALPINE_VERSION=3.15
ARG RUST_VERSION=nightly-2023-12-03
ARG RUST_VERSION=nightly-2022-12-10
ARG CARGO_CHEF_TAG=0.1.41
FROM alpine:${ALPINE_VERSION}

View File

@@ -1,4 +1,4 @@
ARG RUST_VERSION=nightly-2023-12-03
ARG RUST_VERSION=nightly-2022-12-10
ARG CARGO_CHEF_TAG=0.1.41
FROM ubuntu:20.04

View File

@@ -0,0 +1,11 @@
# Start from the latest golang base image
FROM golang:1.20
# Install Docker
RUN apt-get update && apt-get install -y docker.io
# Set the working directory
WORKDIR /go/src/app
# This container will be executable
ENTRYPOINT [ "/bin/bash" ]

View File

@@ -1,23 +0,0 @@
FROM ubuntu:24.04 AS builder
RUN apt-get update -y && apt-get upgrade -y
# Install basic packages
RUN apt-get install build-essential curl wget git pkg-config -y
# Install dev-packages
RUN apt-get install libclang-dev libssl-dev llvm -y
# Install Rust
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
ENV CARGO_HOME=/root/.cargo
COPY . /src
RUN cd /src/zkvm-prover && make prover
FROM ubuntu:24.04 AS runtime
COPY --from=builder /src/target/release/prover /usr/local/bin/
ENTRYPOINT ["prover"]

View File

@@ -1,5 +0,0 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -1,30 +0,0 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build rollup_relayer
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/permissionless_batches/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/rollup_relayer /bin/
WORKDIR /app
ENTRYPOINT ["rollup_relayer"]

View File

@@ -1,8 +0,0 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-alpine-builder:1.20 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -16,15 +17,10 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/rollup_relayer/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
cd /src/rollup/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/rollup_relayer /bin/
WORKDIR /app
ENTRYPOINT ["rollup_relayer"]

View File

@@ -1,8 +1,5 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/
*target/*

View File

@@ -15,7 +15,7 @@ import (
const (
// GolangCIVersion to be used for linting.
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.2"
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2"
)
// GOBIN environment variable.
@@ -51,7 +51,7 @@ func lint() {
}
cmd = exec.Command(filepath.Join(goBin(), "golangci-lint"))
cmd.Args = append(cmd.Args, "run", "--config", "../build/.golangci.yml", "--timeout", "10m")
cmd.Args = append(cmd.Args, "run", "--config", "../build/.golangci.yml")
if *v {
cmd.Args = append(cmd.Args, "-v")

26
build/run_tests.sh Executable file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
set -uex
profile_name=$1
exclude_dirs=("scroll-tech/rollup/cmd" "scroll-tech/rollup/tests" "scroll-tech/rollup/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
coverpkg="scroll-tech/${profile_name}"
for pkg in $all_packages; do
exclude_pkg=false
for exclude_dir in "${exclude_dirs[@]}"; do
if [[ $pkg == $exclude_dir* ]]; then
exclude_pkg=true
break
fi
done
if [ "$exclude_pkg" = false ]; then
coverpkg="$coverpkg,$pkg/..."
fi
done
echo "coverage.${profile_name}.txt"
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="$coverpkg" -coverprofile=../coverage.${profile_name}.txt -covermode=atomic ./...

3
common/.gitignore vendored
View File

@@ -1,3 +1,4 @@
/build/bin
.idea
libzkp
libzkp/impl/target
libzkp/interface/*.a

View File

@@ -4,4 +4,5 @@ test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
GOBIN=$(PWD)/build/bin go run ../build/lint.go
cd libzkp/impl && cargo fmt --all -- --check && cargo clippy --release -- -D warnings

View File

@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, rowsAffected := fc()
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
}
// InitDB init the db handler

View File

@@ -1,27 +1,62 @@
package database_test
package database
import (
"context"
"errors"
"io"
"os"
"testing"
"time"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/scroll-tech/go-ethereum/log"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/testcontainers"
"scroll-tech/common/docker"
"scroll-tech/common/version"
)
func TestGormLogger(t *testing.T) {
output := io.Writer(os.Stderr)
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger := log.NewGlogHandler(ostream)
// Set log level
glogger.Verbosity(log.LvlTrace)
log.Root().SetHandler(glogger)
var gl gormLogger
gl.gethLogger = log.Root()
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
}
func TestDB(t *testing.T) {
version.Version = "v4.1.98-aaa-bbb-ccc"
base := docker.NewDockerApp()
base.RunDBImage(t)
testApps := testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
dbCfg := &Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
db, err := testApps.GetGormDBClient()
var err error
db, err := InitDB(dbCfg)
assert.NoError(t, err)
sqlDB, err := database.Ping(db)
sqlDB, err := Ping(db)
assert.NoError(t, err)
assert.NotNil(t, sqlDB)
assert.NoError(t, database.CloseDB(db))
assert.NoError(t, CloseDB(db))
}

View File

@@ -1,35 +0,0 @@
package database
import (
"context"
"errors"
"io"
"os"
"testing"
"time"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/scroll-tech/go-ethereum/log"
)
func TestGormLogger(t *testing.T) {
output := io.Writer(os.Stderr)
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger := log.NewGlogHandler(ostream)
// Set log level
glogger.Verbosity(log.LvlTrace)
log.Root().SetHandler(glogger)
var gl gormLogger
gl.gethLogger = log.Root()
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
}

196
common/docker/docker_app.go Normal file
View File

@@ -0,0 +1,196 @@
package docker
import (
"crypto/rand"
"database/sql"
"encoding/json"
"fmt"
"math/big"
"os"
"testing"
"time"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/common/utils"
)
var (
l1StartPort = 10000
l2StartPort = 20000
dbStartPort = 30000
)
// AppAPI app interface.
type AppAPI interface {
IsRunning() bool
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
RunApp(waitResult func() bool)
WaitExit()
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
}
// App is collection struct of runtime docker images
type App struct {
L1gethImg GethImgInstance
L2gethImg GethImgInstance
DBImg ImgInstance
dbClient *sql.DB
DBConfig *database.DBConfig
DBConfigFile string
// common time stamp.
Timestamp int
}
// NewDockerApp returns new instance of dockerApp struct
func NewDockerApp() *App {
timestamp := time.Now().Nanosecond()
app := &App{
Timestamp: timestamp,
L1gethImg: newTestL1Docker(),
L2gethImg: newTestL2Docker(),
DBImg: newTestDBDocker("postgres"),
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
}
if err := app.mockDBConfig(); err != nil {
panic(err)
}
return app
}
// RunImages runs all images togather
func (b *App) RunImages(t *testing.T) {
b.RunDBImage(t)
b.RunL1Geth(t)
b.RunL2Geth(t)
}
// RunDBImage starts postgres docker container.
func (b *App) RunDBImage(t *testing.T) {
if b.DBImg.IsRunning() {
return
}
assert.NoError(t, b.DBImg.Start())
// try 5 times until the db is ready.
ok := utils.TryTimes(10, func() bool {
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
return err == nil && db != nil && db.Ping() == nil
})
assert.True(t, ok)
}
// Free clear all running images, double check and recycle docker container.
func (b *App) Free() {
if b.L1gethImg.IsRunning() {
_ = b.L1gethImg.Stop()
}
if b.L2gethImg.IsRunning() {
_ = b.L2gethImg.Stop()
}
if b.DBImg.IsRunning() {
_ = b.DBImg.Stop()
_ = os.Remove(b.DBConfigFile)
if !utils.IsNil(b.dbClient) {
_ = b.dbClient.Close()
b.dbClient = nil
}
}
}
// RunL1Geth starts l1geth docker container.
func (b *App) RunL1Geth(t *testing.T) {
if b.L1gethImg.IsRunning() {
return
}
assert.NoError(t, b.L1gethImg.Start())
}
// L1Client returns a ethclient by dialing running l1geth
func (b *App) L1Client() (*ethclient.Client, error) {
if utils.IsNil(b.L1gethImg) {
return nil, fmt.Errorf("l1 geth is not running")
}
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
// RunL2Geth starts l2geth docker container.
func (b *App) RunL2Geth(t *testing.T) {
if b.L2gethImg.IsRunning() {
return
}
assert.NoError(t, b.L2gethImg.Start())
}
// L2Client returns a ethclient by dialing running l2geth
func (b *App) L2Client() (*ethclient.Client, error) {
if utils.IsNil(b.L2gethImg) {
return nil, fmt.Errorf("l2 geth is not running")
}
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
// DBClient create and return *sql.DB instance.
func (b *App) DBClient(t *testing.T) *sql.DB {
if !utils.IsNil(b.dbClient) {
return b.dbClient
}
var (
cfg = b.DBConfig
err error
)
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
assert.NoError(t, err)
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
assert.NoError(t, b.dbClient.Ping())
return b.dbClient
}
func (b *App) mockDBConfig() error {
b.DBConfig = &database.DBConfig{
DSN: "",
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
if b.DBImg != nil {
b.DBConfig.DSN = b.DBImg.Endpoint()
}
data, err := json.Marshal(b.DBConfig)
if err != nil {
return err
}
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
}
func newTestL1Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
}
func newTestL2Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
}
func newTestDBDocker(driverName string) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
}

132
common/docker/docker_db.go Normal file
View File

@@ -0,0 +1,132 @@
package docker
import (
"context"
"fmt"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgDB the postgres image manager.
type ImgDB struct {
image string
name string
id string
dbName string
port int
password string
running bool
cmd *cmd.Cmd
}
// NewImgDB return postgres db img instance.
func NewImgDB(image, password, dbName string, port int) ImgInstance {
img := &ImgDB{
image: image,
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
password: password,
dbName: dbName,
port: port,
}
img.cmd = cmd.NewCmd("docker", img.prepare()...)
return img
}
// Start postgres db container.
func (i *ImgDB) Start() error {
id := GetContainerID(i.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.running = i.isOk()
if !i.running {
_ = i.Stop()
return fmt.Errorf("failed to start image: %s", i.image)
}
return nil
}
// Stop the container.
func (i *ImgDB) Stop() error {
if !i.running {
return nil
}
i.running = false
ctx := context.Background()
// stop the running container.
if i.id == "" {
i.id = GetContainerID(i.name)
}
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
return err
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
}
// Endpoint return the dsn.
func (i *ImgDB) Endpoint() string {
return fmt.Sprintf("postgres://postgres:%s@localhost:%d/%s?sslmode=disable", i.password, i.port, i.dbName)
}
// IsRunning returns docker container's running status.
func (i *ImgDB) IsRunning() bool {
return i.running
}
func (i *ImgDB) prepare() []string {
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
envs := []string{
"-e", "POSTGRES_PASSWORD=" + i.password,
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
}
cmd = append(cmd, envs...)
return append(cmd, i.image)
}
func (i *ImgDB) isOk() bool {
keyword := "database system is ready to accept connections"
okCh := make(chan struct{}, 1)
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer i.cmd.UnRegistFunc(keyword)
// Start cmd in parallel.
i.cmd.RunCmd(true)
select {
case <-okCh:
utils.TryTimes(20, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
case err := <-i.cmd.ErrChan:
if err != nil {
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
}
case <-time.After(time.Second * 20):
return false
}
return i.id != ""
}

View File

@@ -0,0 +1,175 @@
package docker
import (
"context"
"fmt"
"math/big"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgGeth the geth image manager include l1geth and l2geth.
type ImgGeth struct {
image string
name string
id string
volume string
ipcPath string
httpPort int
wsPort int
chainID *big.Int
running bool
cmd *cmd.Cmd
}
// NewImgGeth return geth img instance.
func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
img := &ImgGeth{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
volume: volume,
ipcPath: ipc,
httpPort: hPort,
wsPort: wPort,
}
img.cmd = cmd.NewCmd("docker", img.params()...)
return img
}
// Start run image and check if it is running healthily.
func (i *ImgGeth) Start() error {
id := GetContainerID(i.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.running = i.isOk()
if !i.running {
_ = i.Stop()
return fmt.Errorf("failed to start image: %s", i.image)
}
// try 10 times to get chainID until is ok.
utils.TryTimes(10, func() bool {
client, err := ethclient.Dial(i.Endpoint())
if err == nil && client != nil {
i.chainID, err = client.ChainID(context.Background())
return err == nil && i.chainID != nil
}
return false
})
return nil
}
// IsRunning returns docker container's running status.
func (i *ImgGeth) IsRunning() bool {
return i.running
}
// Endpoint return the connection endpoint.
func (i *ImgGeth) Endpoint() string {
switch true {
case i.httpPort != 0:
return fmt.Sprintf("http://127.0.0.1:%d", i.httpPort)
case i.wsPort != 0:
return fmt.Sprintf("ws://127.0.0.1:%d", i.wsPort)
default:
return i.ipcPath
}
}
// ChainID return chainID.
func (i *ImgGeth) ChainID() *big.Int {
return i.chainID
}
func (i *ImgGeth) isOk() bool {
keyword := "WebSocket enabled"
okCh := make(chan struct{}, 1)
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer i.cmd.UnRegistFunc(keyword)
// Start cmd in parallel.
i.cmd.RunCmd(true)
select {
case <-okCh:
utils.TryTimes(20, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
case err := <-i.cmd.ErrChan:
if err != nil {
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
}
case <-time.After(time.Second * 10):
return false
}
return i.id != ""
}
// Stop the docker container.
func (i *ImgGeth) Stop() error {
if !i.running {
return nil
}
i.running = false
ctx := context.Background()
// check if container is running, stop the running container.
id := GetContainerID(i.name)
if id != "" {
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
return err
}
i.id = id
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
}
func (i *ImgGeth) params() []string {
cmds := []string{"run", "--rm", "--name", i.name}
var ports []string
if i.httpPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
}
if i.wsPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.wsPort) + ":8546"}...)
}
var envs []string
if i.ipcPath != "" {
envs = append(envs, []string{"-e", fmt.Sprintf("IPC_PATH=%s", i.ipcPath)}...)
}
if i.volume != "" {
cmds = append(cmds, []string{"-v", fmt.Sprintf("%s:%s", i.volume, i.volume)}...)
}
cmds = append(cmds, ports...)
cmds = append(cmds, envs...)
return append(cmds, i.image)
}

View File

@@ -0,0 +1,54 @@
package docker_test
import (
"context"
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
)
var (
base *docker.App
)
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func TestDB(t *testing.T) {
base.RunDBImage(t)
db, err := sqlx.Open("postgres", base.DBImg.Endpoint())
assert.NoError(t, err)
assert.NoError(t, db.Ping())
}
func TestL1Geth(t *testing.T) {
base.RunL1Geth(t)
client, err := base.L1Client()
assert.NoError(t, err)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}
func TestL2Geth(t *testing.T) {
base.RunL2Geth(t)
client, err := base.L2Client()
assert.NoError(t, err)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}

View File

@@ -4,7 +4,7 @@ import (
"context"
"math/big"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
)
@@ -40,7 +40,7 @@ type GethImgInstance interface {
func GetContainerID(name string) string {
filter := filters.NewArgs()
filter.Add("name", name)
lst, _ := cli.ContainerList(context.Background(), container.ListOptions{
lst, _ := cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filter,
})
if len(lst) > 0 {

View File

@@ -1,4 +1,4 @@
FROM ethereum/client-go:v1.14.0
FROM ethereum/client-go
COPY password /l1geth/
COPY genesis.json /l1geth/

Some files were not shown because too many files have changed in this diff Show More