mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
40 Commits
codecv1-sc
...
v4.4.6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f2fd8963a9 | ||
|
|
e64bb7725f | ||
|
|
c8cfa9c15d | ||
|
|
612e66ebc2 | ||
|
|
1c7c30c9ae | ||
|
|
320ab56d1d | ||
|
|
54415f6a78 | ||
|
|
87c9c33bcc | ||
|
|
fdcd43a296 | ||
|
|
a1a7f25921 | ||
|
|
8be70f0c80 | ||
|
|
d1bec53e50 | ||
|
|
0723b463c5 | ||
|
|
46b1ff3284 | ||
|
|
a3635dba52 | ||
|
|
34ad8ca772 | ||
|
|
8a2a2eb292 | ||
|
|
8ca89374a0 | ||
|
|
8128526116 | ||
|
|
9262e9af69 | ||
|
|
5090b77655 | ||
|
|
4cafc9349a | ||
|
|
ca6f856372 | ||
|
|
72ee087f35 | ||
|
|
8f8f6eb1a1 | ||
|
|
c56bda9f47 | ||
|
|
433d5c2f52 | ||
|
|
69c0f7ed75 | ||
|
|
c25b827666 | ||
|
|
da4f6818e3 | ||
|
|
200ca7c15b | ||
|
|
53cf26597d | ||
|
|
f0f7341271 | ||
|
|
b6af88c936 | ||
|
|
de541a650a | ||
|
|
d7a57235d3 | ||
|
|
91d21301ec | ||
|
|
4b32a44a70 | ||
|
|
55b400c5fb | ||
|
|
1b49091207 |
6
.github/workflows/bridge_history_api.yml
vendored
6
.github/workflows/bridge_history_api.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Lint
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Test
|
||||
run: |
|
||||
make test
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/bridge-history-api/ -w .
|
||||
|
||||
2
.github/workflows/bump_version.yml
vendored
2
.github/workflows/bump_version.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.head_ref }}
|
||||
- name: check diff
|
||||
|
||||
8
.github/workflows/common.yml
vendored
8
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-12-10
|
||||
toolchain: nightly-2023-12-03
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
@@ -81,7 +81,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
|
||||
4
.github/workflows/contracts.yml
vendored
4
.github/workflows/contracts.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
@@ -98,7 +98,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
|
||||
16
.github/workflows/coordinator.yml
vendored
16
.github/workflows/coordinator.yml
vendored
@@ -33,15 +33,15 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-12-10
|
||||
toolchain: nightly-2023-12-03
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Lint
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
@@ -54,9 +54,9 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
@@ -77,7 +77,7 @@ jobs:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: Checkout code
|
||||
# uses: actions/checkout@v2
|
||||
# uses: actions/checkout@v4
|
||||
# - name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v2
|
||||
# - name: Build and push
|
||||
@@ -95,9 +95,9 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
|
||||
6
.github/workflows/database.yml
vendored
6
.github/workflows/database.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Lint
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
|
||||
66
.github/workflows/docker.yml
vendored
66
.github/workflows/docker.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
@@ -46,6 +46,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -57,7 +58,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
@@ -90,6 +91,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -101,7 +103,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
@@ -134,6 +136,52 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: rollup-db-cli
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: rollup-db-cli
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/db_cli.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -145,7 +193,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
@@ -178,6 +226,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -189,7 +238,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
@@ -222,6 +271,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -233,7 +283,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
@@ -266,6 +316,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -277,7 +328,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
@@ -310,6 +361,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-cron.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
2
.github/workflows/integration.yml
vendored
2
.github/workflows/integration.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
|
||||
216
.github/workflows/intermediate-docker.yml
vendored
216
.github/workflows/intermediate-docker.yml
vendored
@@ -7,12 +7,12 @@ on:
|
||||
description: 'Go version'
|
||||
required: true
|
||||
type: string
|
||||
default: '1.20'
|
||||
default: '1.21'
|
||||
RUST_VERSION:
|
||||
description: 'Rust toolchain version'
|
||||
required: true
|
||||
type: string
|
||||
default: 'nightly-2022-12-10'
|
||||
default: 'nightly-2023-12-03'
|
||||
PYTHON_VERSION:
|
||||
description: 'Python version'
|
||||
required: false
|
||||
@@ -29,31 +29,191 @@ defaults:
|
||||
working-directory: 'build/dockerfiles/intermediate'
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
build-and-publish-cuda-go-rust-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build
|
||||
run: |
|
||||
make all
|
||||
env:
|
||||
GO_VERSION: ${{ inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ inputs.RUST_VERSION }}
|
||||
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
|
||||
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
|
||||
- name: Publish
|
||||
run: |
|
||||
make publish
|
||||
env:
|
||||
GO_VERSION: ${{ inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ inputs.RUST_VERSION }}
|
||||
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
|
||||
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/cuda-go-rust-builder.Dockerfile
|
||||
tags: scrolltech/cuda-go-rust-builder:cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
|
||||
build-args: |
|
||||
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
|
||||
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-go-rust-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/go-rust-builder.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/go-rust-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
|
||||
build-args: |
|
||||
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-go-alpine-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/go-alpine-builder.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/go-alpine-builder:${{ github.event.inputs.GO_VERSION }}
|
||||
build-args: |
|
||||
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-rust-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/rust-builder.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/rust-builder:${{ github.event.inputs.RUST_VERSION }}
|
||||
build-args: |
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-rust-alpine-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/rust-alpine-builder.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/rust-alpine-builder:${{ github.event.inputs.RUST_VERSION }}
|
||||
build-args: |
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-go-rust-alpine-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/go-rust-alpine-builder.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/go-rust-alpine-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
|
||||
build-args: |
|
||||
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-py-runner:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/py-runner.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/py-runner:${{ github.event.inputs.PYTHON_VERSION }}
|
||||
build-args: |
|
||||
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
|
||||
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
|
||||
10
.github/workflows/prover.yml
vendored
10
.github/workflows/prover.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Test
|
||||
run: |
|
||||
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-12-10
|
||||
toolchain: nightly-2023-12-03
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Lint
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
@@ -89,7 +89,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/prover/ -w .
|
||||
|
||||
8
.github/workflows/rollup.yml
vendored
8
.github/workflows/rollup.yml
vendored
@@ -36,7 +36,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: Checkout code
|
||||
# uses: actions/checkout@v2
|
||||
# uses: actions/checkout@v4
|
||||
# - name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v2
|
||||
# - run: make docker
|
||||
|
||||
12
Makefile
12
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v5.1.6
|
||||
L2GETH_TAG=scroll-v5.3.0
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
@@ -44,14 +44,8 @@ fmt: ## format the code
|
||||
|
||||
dev_docker: ## build docker images for development/testing usages
|
||||
docker pull postgres
|
||||
docker build -t scroll_l1geth ./common/docker/l1geth/
|
||||
docker build -t scroll_l2geth ./common/docker/l2geth/
|
||||
|
||||
build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
|
||||
docker build -t scroll_test_image -f ./build/dockerfiles/local_testing.Dockerfile $$(mktemp -d)
|
||||
|
||||
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
|
||||
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app -e HOST_PATH=$(PWD) scroll_test_image
|
||||
docker build -t scroll_l1geth ./common/testcontainers/docker/l1geth/
|
||||
docker build -t scroll_l2geth ./common/testcontainers/docker/l2geth/
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
35
README.md
35
README.md
@@ -43,8 +43,6 @@ make dev_docker
|
||||
|
||||
## Testing Rollup & Coordinator
|
||||
|
||||
### For Non-Apple Silicon (M1/M2) Macs
|
||||
|
||||
Run the tests using the following commands:
|
||||
|
||||
```bash
|
||||
@@ -54,39 +52,6 @@ go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
```
|
||||
|
||||
### For Apple Silicon (M1/M2) Macs
|
||||
|
||||
To run tests on Apple Silicon Macs, build and execute the Docker image as outlined below:
|
||||
|
||||
#### Build a Docker Image for Testing
|
||||
|
||||
Use the following command to build a Docker image:
|
||||
|
||||
```bash
|
||||
make build_test_docker
|
||||
```
|
||||
|
||||
This command builds a Docker image named `scroll_test_image` using the Dockerfile found at `./build/dockerfiles/local_test.Dockerfile`.
|
||||
|
||||
#### Run Docker Image
|
||||
|
||||
After the image is built, run a Docker container from it:
|
||||
|
||||
```bash
|
||||
make run_test_docker
|
||||
```
|
||||
|
||||
This command runs a Docker container named `scroll_test_container` from the `scroll_test_image` image. The container uses the host network and has access to the Docker socket and the current directory.
|
||||
|
||||
Once the Docker container is running, execute the tests using the following commands:
|
||||
|
||||
```bash
|
||||
go test -v -race -covermode=atomic scroll-tech/rollup/...
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
```
|
||||
|
||||
## Testing Contracts
|
||||
|
||||
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).
|
||||
|
||||
@@ -79,3 +79,50 @@ provides REST APIs. Please refer to the API details below.
|
||||
// @Success 200
|
||||
// @Router /api/txsbyhashes [post]
|
||||
```
|
||||
|
||||
## Running bridge-history-api locally
|
||||
|
||||
1. Pull the latest Redis image:
|
||||
```
|
||||
docker pull redis:latest
|
||||
```
|
||||
|
||||
2. Run the Redis container:
|
||||
```
|
||||
docker run --name bridgehistoryapi-redis -d -p 6379:6379 redis:latest
|
||||
```
|
||||
|
||||
3. Pull the latest PostgreSQL image:
|
||||
```
|
||||
docker pull postgres:latest
|
||||
```
|
||||
|
||||
4. Run the PostgreSQL container:
|
||||
```
|
||||
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
|
||||
```
|
||||
|
||||
5. Run database migrations to initialize the tables:
|
||||
```
|
||||
make bridgehistoryapi-db-cli
|
||||
./build/bin/bridgehistoryapi-db-cli migrate
|
||||
```
|
||||
|
||||
6. Run bridgehistoryapi-fetcher:
|
||||
```
|
||||
make bridgehistoryapi-fetcher
|
||||
./build/bin/bridgehistoryapi-fetcher
|
||||
```
|
||||
|
||||
7. Run bridgehistoryapi-api:
|
||||
```
|
||||
make bridgehistoryapi-api
|
||||
./build/bin/bridgehistoryapi-api
|
||||
```
|
||||
|
||||
The endpoints provided in [./conf/config.json](./conf/config.json) are all public endpoints and have rate limits.
|
||||
|
||||
For production usage:
|
||||
|
||||
- For L1 endpoints, utilizing a service provider's free tier should suffice.
|
||||
- For L2 endpoints, consider [running a Scroll L2geth node](https://docs.scroll.xyz/en/developers/guides/running-a-scroll-node) and using the exposed HTTP port.
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -26,32 +26,27 @@ func init() {
|
||||
Name: "reset",
|
||||
Usage: "Clean and reset database.",
|
||||
Action: resetDB,
|
||||
Flags: []cli.Flag{&utils.ConfigFileFlag},
|
||||
},
|
||||
{
|
||||
Name: "status",
|
||||
Usage: "Check migration status.",
|
||||
Action: checkDBStatus,
|
||||
Flags: []cli.Flag{&utils.ConfigFileFlag},
|
||||
},
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Display the current database version.",
|
||||
Action: dbVersion,
|
||||
Flags: []cli.Flag{&utils.ConfigFileFlag},
|
||||
},
|
||||
{
|
||||
Name: "migrate",
|
||||
Usage: "Migrate the database to the latest version.",
|
||||
Action: migrateDB,
|
||||
Flags: []cli.Flag{&utils.ConfigFileFlag},
|
||||
},
|
||||
{
|
||||
Name: "rollback",
|
||||
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
|
||||
Action: rollbackDB,
|
||||
Flags: []cli.Flag{
|
||||
&utils.ConfigFileFlag,
|
||||
&cli.IntFlag{
|
||||
Name: "version",
|
||||
Usage: "Rollback to the specified version.",
|
||||
|
||||
@@ -15,9 +15,11 @@
|
||||
"USDCGatewayAddr": "0xf1AF3b23DE0A5Ca3CAb7261cb0061C0D779A5c7B",
|
||||
"LIDOGatewayAddr": "0x6625C6332c9F91F2D27c304E729B86db87A3f504",
|
||||
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
|
||||
"PufferGatewayAddr": "0xA033Ff09f2da45f0e9ae495f525363722Df42b2a",
|
||||
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
|
||||
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
|
||||
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
|
||||
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
|
||||
"BatchBridgeGatewayAddr": "0x0000000000000000000000000000000000000000"
|
||||
},
|
||||
"L2": {
|
||||
"confirmation": 0,
|
||||
@@ -34,7 +36,10 @@
|
||||
"USDCGatewayAddr": "0x33B60d5Dd260d453cAC3782b0bDC01ce84672142",
|
||||
"LIDOGatewayAddr": "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9",
|
||||
"DAIGatewayAddr": "0xaC78dff3A87b5b534e366A93E785a0ce8fA6Cc62",
|
||||
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79"
|
||||
"PufferGatewayAddr": "0x9eBf2f33526CD571f8b2ad312492cb650870CFd6",
|
||||
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79",
|
||||
"MessageQueueAddr": "0x5300000000000000000000000000000000000000",
|
||||
"BatchBridgeGatewayAddr": "0x0000000000000000000000000000000000000000"
|
||||
},
|
||||
"db": {
|
||||
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",
|
||||
|
||||
@@ -29,6 +29,7 @@ type FetcherConfig struct {
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
GatewayRouterAddr string `json:"GatewayRouterAddr"`
|
||||
MessageQueueAddr string `json:"MessageQueueAddr"`
|
||||
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
|
||||
@@ -8,8 +8,17 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// HistoryCtrler is controller instance
|
||||
HistoryCtrler *HistoryController
|
||||
// TxsByAddressCtl the TxsByAddressController instance
|
||||
TxsByAddressCtl *TxsByAddressController
|
||||
|
||||
// TxsByHashesCtl the TxsByHashesController instance
|
||||
TxsByHashesCtl *TxsByHashesController
|
||||
|
||||
// L2UnclaimedWithdrawalsByAddressCtl the L2UnclaimedWithdrawalsByAddressController instance
|
||||
L2UnclaimedWithdrawalsByAddressCtl *L2UnclaimedWithdrawalsByAddressController
|
||||
|
||||
// L2WithdrawalsByAddressCtl the L2WithdrawalsByAddressController instance
|
||||
L2WithdrawalsByAddressCtl *L2WithdrawalsByAddressController
|
||||
|
||||
initControllerOnce sync.Once
|
||||
)
|
||||
@@ -17,6 +26,9 @@ var (
|
||||
// InitController inits Controller with database
|
||||
func InitController(db *gorm.DB, redis *redis.Client) {
|
||||
initControllerOnce.Do(func() {
|
||||
HistoryCtrler = NewHistoryController(db, redis)
|
||||
TxsByAddressCtl = NewTxsByAddressController(db, redis)
|
||||
TxsByHashesCtl = NewTxsByHashesController(db, redis)
|
||||
L2UnclaimedWithdrawalsByAddressCtl = NewL2UnclaimedWithdrawalsByAddressController(db, redis)
|
||||
L2WithdrawalsByAddressCtl = NewL2WithdrawalsByAddressController(db, redis)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/logic"
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// HistoryController contains the query claimable txs service
|
||||
type HistoryController struct {
|
||||
historyLogic *logic.HistoryLogic
|
||||
}
|
||||
|
||||
// NewHistoryController return HistoryController instance
|
||||
func NewHistoryController(db *gorm.DB, redis *redis.Client) *HistoryController {
|
||||
return &HistoryController{
|
||||
historyLogic: logic.NewHistoryLogic(db, redis),
|
||||
}
|
||||
}
|
||||
|
||||
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
|
||||
func (c *HistoryController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
|
||||
// GetL2WithdrawalsByAddress defines the http get method behavior
|
||||
func (c *HistoryController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
|
||||
// GetTxsByAddress defines the http get method behavior
|
||||
func (c *HistoryController) GetTxsByAddress(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetTxsError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
|
||||
// PostQueryTxsByHashes defines the http post method behavior
|
||||
func (c *HistoryController) PostQueryTxsByHashes(ctx *gin.Context) {
|
||||
var req types.QueryByHashRequest
|
||||
if err := ctx.ShouldBindJSON(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/logic"
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// L2UnclaimedWithdrawalsByAddressController the controller of GetL2UnclaimedWithdrawalsByAddress
|
||||
type L2UnclaimedWithdrawalsByAddressController struct {
|
||||
historyLogic *logic.HistoryLogic
|
||||
}
|
||||
|
||||
// NewL2UnclaimedWithdrawalsByAddressController create new L2UnclaimedWithdrawalsByAddressController
|
||||
func NewL2UnclaimedWithdrawalsByAddressController(db *gorm.DB, redisClient *redis.Client) *L2UnclaimedWithdrawalsByAddressController {
|
||||
return &L2UnclaimedWithdrawalsByAddressController{
|
||||
historyLogic: logic.NewHistoryLogic(db, redisClient),
|
||||
}
|
||||
}
|
||||
|
||||
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
|
||||
func (c *L2UnclaimedWithdrawalsByAddressController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/logic"
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// L2WithdrawalsByAddressController the controller of GetL2WithdrawalsByAddress
|
||||
type L2WithdrawalsByAddressController struct {
|
||||
historyLogic *logic.HistoryLogic
|
||||
}
|
||||
|
||||
// NewL2WithdrawalsByAddressController create new L2WithdrawalsByAddressController
|
||||
func NewL2WithdrawalsByAddressController(db *gorm.DB, redisClient *redis.Client) *L2WithdrawalsByAddressController {
|
||||
return &L2WithdrawalsByAddressController{
|
||||
historyLogic: logic.NewHistoryLogic(db, redisClient),
|
||||
}
|
||||
}
|
||||
|
||||
// GetL2WithdrawalsByAddress defines the http get method behavior
|
||||
func (c *L2WithdrawalsByAddressController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
40
bridge-history-api/internal/controller/api/txs_by_address.go
Normal file
40
bridge-history-api/internal/controller/api/txs_by_address.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/logic"
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// TxsByAddressController the controller of GetTxsByAddress
|
||||
type TxsByAddressController struct {
|
||||
historyLogic *logic.HistoryLogic
|
||||
}
|
||||
|
||||
// NewTxsByAddressController create new TxsByAddressController
|
||||
func NewTxsByAddressController(db *gorm.DB, redisClient *redis.Client) *TxsByAddressController {
|
||||
return &TxsByAddressController{
|
||||
historyLogic: logic.NewHistoryLogic(db, redisClient),
|
||||
}
|
||||
}
|
||||
|
||||
// GetTxsByAddress defines the http get method behavior
|
||||
func (c *TxsByAddressController) GetTxsByAddress(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetTxsError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: pagedTxs, Total: total}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
40
bridge-history-api/internal/controller/api/txs_by_hashes.go
Normal file
40
bridge-history-api/internal/controller/api/txs_by_hashes.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/logic"
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// TxsByHashesController the controller of PostQueryTxsByHashes
|
||||
type TxsByHashesController struct {
|
||||
historyLogic *logic.HistoryLogic
|
||||
}
|
||||
|
||||
// NewTxsByHashesController create a new TxsByHashesController
|
||||
func NewTxsByHashesController(db *gorm.DB, redisClient *redis.Client) *TxsByHashesController {
|
||||
return &TxsByHashesController{
|
||||
historyLogic: logic.NewHistoryLogic(db, redisClient),
|
||||
}
|
||||
}
|
||||
|
||||
// PostQueryTxsByHashes query the txs by hashes
|
||||
func (c *TxsByHashesController) PostQueryTxsByHashes(ctx *gin.Context) {
|
||||
var req types.QueryByHashRequest
|
||||
if err := ctx.ShouldBindJSON(&req); err != nil {
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
|
||||
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
|
||||
types.RenderSuccess(ctx, resultData)
|
||||
}
|
||||
@@ -63,7 +63,7 @@ func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
|
||||
|
||||
// Start starts the L1 message fetching process.
|
||||
func (c *L1MessageFetcher) Start() {
|
||||
messageSyncedHeight, batchSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
|
||||
messageSyncedHeight, batchSyncedHeight, bridgeBatchDepositSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
|
||||
if dbErr != nil {
|
||||
log.Crit("L1MessageFetcher start failed", "err", dbErr)
|
||||
}
|
||||
@@ -72,6 +72,11 @@ func (c *L1MessageFetcher) Start() {
|
||||
if batchSyncedHeight > l1SyncHeight {
|
||||
l1SyncHeight = batchSyncedHeight
|
||||
}
|
||||
|
||||
if bridgeBatchDepositSyncedHeight > l1SyncHeight {
|
||||
l1SyncHeight = bridgeBatchDepositSyncedHeight
|
||||
}
|
||||
|
||||
if c.cfg.StartHeight > l1SyncHeight {
|
||||
l1SyncHeight = c.cfg.StartHeight - 1
|
||||
}
|
||||
@@ -91,7 +96,13 @@ func (c *L1MessageFetcher) Start() {
|
||||
|
||||
c.updateL1SyncHeight(l1SyncHeight, header.Hash())
|
||||
|
||||
log.Info("Start L1 message fetcher", "message synced height", messageSyncedHeight, "batch synced height", batchSyncedHeight, "config start height", c.cfg.StartHeight, "sync start height", c.l1SyncHeight+1)
|
||||
log.Info("Start L1 message fetcher",
|
||||
"message synced height", messageSyncedHeight,
|
||||
"batch synced height", batchSyncedHeight,
|
||||
"bridge batch deposit height", bridgeBatchDepositSyncedHeight,
|
||||
"config start height", c.cfg.StartHeight,
|
||||
"sync start height", c.l1SyncHeight+1,
|
||||
)
|
||||
|
||||
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
|
||||
go func() {
|
||||
|
||||
@@ -64,13 +64,17 @@ func NewL2MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
|
||||
|
||||
// Start starts the L2 message fetching process.
|
||||
func (c *L2MessageFetcher) Start() {
|
||||
l2SentMessageSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
|
||||
l2SentMessageSyncedHeight, l2BridgeBatchDepositSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
|
||||
if dbErr != nil {
|
||||
log.Crit("failed to get L2 cross message processed height", "err", dbErr)
|
||||
return
|
||||
}
|
||||
|
||||
l2SyncHeight := l2SentMessageSyncedHeight
|
||||
if l2BridgeBatchDepositSyncedHeight > l2SyncHeight {
|
||||
l2SyncHeight = l2BridgeBatchDepositSyncedHeight
|
||||
}
|
||||
|
||||
// Sync from an older block to prevent reorg during restart.
|
||||
if l2SyncHeight < logic.L2ReorgSafeDepth {
|
||||
l2SyncHeight = 0
|
||||
@@ -86,7 +90,8 @@ func (c *L2MessageFetcher) Start() {
|
||||
|
||||
c.updateL2SyncHeight(l2SyncHeight, header.Hash())
|
||||
|
||||
log.Info("Start L2 message fetcher", "message synced height", l2SentMessageSyncedHeight, "sync start height", l2SyncHeight+1)
|
||||
log.Info("Start L2 message fetcher", "l2 sent message synced height", l2SentMessageSyncedHeight,
|
||||
"bridge batch deposit synced height", l2BridgeBatchDepositSyncedHeight, "sync start height", l2SyncHeight+1)
|
||||
|
||||
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
|
||||
go func() {
|
||||
@@ -141,6 +146,11 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
return
|
||||
}
|
||||
|
||||
if updateErr := c.eventUpdateLogic.UpdateL2BridgeBatchDepositEvent(c.ctx, l2FetcherResult.BridgeBatchDepositMessage); updateErr != nil {
|
||||
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
|
||||
return
|
||||
}
|
||||
|
||||
c.updateL2SyncHeight(to, lastBlockHash)
|
||||
c.l2MessageFetcherRunningTotal.Inc()
|
||||
}
|
||||
|
||||
@@ -11,14 +11,16 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
btypes "scroll-tech/bridge-history-api/internal/types"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// EventUpdateLogic the logic of insert/update the database
|
||||
type EventUpdateLogic struct {
|
||||
db *gorm.DB
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
batchEventOrm *orm.BatchEvent
|
||||
db *gorm.DB
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
batchEventOrm *orm.BatchEvent
|
||||
bridgeBatchDepositEventOrm *orm.BridgeBatchDepositEvent
|
||||
|
||||
eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight prometheus.Gauge
|
||||
eventUpdateLogicL2MessageNonceUpdateHeight prometheus.Gauge
|
||||
@@ -27,9 +29,10 @@ type EventUpdateLogic struct {
|
||||
// NewEventUpdateLogic creates a EventUpdateLogic instance
|
||||
func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
|
||||
b := &EventUpdateLogic{
|
||||
db: db,
|
||||
crossMessageOrm: orm.NewCrossMessage(db),
|
||||
batchEventOrm: orm.NewBatchEvent(db),
|
||||
db: db,
|
||||
crossMessageOrm: orm.NewCrossMessage(db),
|
||||
batchEventOrm: orm.NewBatchEvent(db),
|
||||
bridgeBatchDepositEventOrm: orm.NewBridgeBatchDepositEvent(db),
|
||||
}
|
||||
|
||||
if !isL1 {
|
||||
@@ -48,30 +51,42 @@ func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
|
||||
}
|
||||
|
||||
// GetL1SyncHeight gets the l1 sync height from db
|
||||
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, error) {
|
||||
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL1SentMessage)
|
||||
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, uint64, error) {
|
||||
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, btypes.MessageTypeL1SentMessage)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 cross message synced height", "error", err)
|
||||
return 0, 0, err
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
batchSyncedHeight, err := b.batchEventOrm.GetBatchEventSyncedHeightInDB(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 batch event synced height", "error", err)
|
||||
return 0, 0, err
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
return messageSyncedHeight, batchSyncedHeight, nil
|
||||
bridgeBatchDepositSyncedHeight, err := b.bridgeBatchDepositEventOrm.GetMessageL1SyncedHeightInDB(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get l1 bridge batch deposit synced height", "error", err)
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
return messageSyncedHeight, batchSyncedHeight, bridgeBatchDepositSyncedHeight, nil
|
||||
}
|
||||
|
||||
// GetL2MessageSyncedHeightInDB gets L2 messages synced height
|
||||
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, error) {
|
||||
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL2SentMessage)
|
||||
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, uint64, error) {
|
||||
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, btypes.MessageTypeL2SentMessage)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 cross message processed height", "err", err)
|
||||
return 0, err
|
||||
return 0, 0, err
|
||||
}
|
||||
return l2SentMessageSyncedHeight, nil
|
||||
|
||||
l2BridgeBatchDepositSyncHeight, err := b.bridgeBatchDepositEventOrm.GetMessageL2SyncedHeightInDB(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get bridge batch deposit processed height", "err", err)
|
||||
return 0, 0, err
|
||||
}
|
||||
return l2SentMessageSyncedHeight, l2BridgeBatchDepositSyncHeight, nil
|
||||
}
|
||||
|
||||
// L1InsertOrUpdate inserts or updates l1 messages
|
||||
@@ -100,6 +115,12 @@ func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult
|
||||
log.Error("failed to insert failed L1 gateway transactions", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.bridgeBatchDepositEventOrm.InsertOrUpdateL1BridgeBatchDepositEvent(ctx, l1FetcherResult.BridgeBatchDepositEvents); err != nil {
|
||||
log.Error("failed to insert L1 bridge batch deposit transactions", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -139,7 +160,7 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
|
||||
|
||||
for i, message := range l2WithdrawMessages {
|
||||
message.MerkleProof = proofs[i]
|
||||
message.RollupStatus = int(orm.RollupStatusTypeFinalized)
|
||||
message.RollupStatus = int(btypes.RollupStatusTypeFinalized)
|
||||
message.BatchIndex = batchIndex
|
||||
}
|
||||
|
||||
@@ -175,6 +196,30 @@ func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, heig
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL2BridgeBatchDepositEvent update l2 bridge batch deposit status
|
||||
func (b *EventUpdateLogic) UpdateL2BridgeBatchDepositEvent(ctx context.Context, l2BatchDistributes []*orm.BridgeBatchDepositEvent) error {
|
||||
distributeFailedMap := make(map[uint64][]string)
|
||||
for _, l2BatchDistribute := range l2BatchDistributes {
|
||||
if btypes.TxStatusType(l2BatchDistribute.TxStatus) == btypes.TxStatusBridgeBatchDistributeFailed {
|
||||
distributeFailedMap[l2BatchDistribute.BatchIndex] = append(distributeFailedMap[l2BatchDistribute.BatchIndex], l2BatchDistribute.Sender)
|
||||
}
|
||||
|
||||
if err := b.bridgeBatchDepositEventOrm.UpdateBatchEventStatus(ctx, l2BatchDistribute); err != nil {
|
||||
log.Error("failed to update L1 bridge batch distribute event", "batchIndex", l2BatchDistribute.BatchIndex, "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for batchIndex, distributeFailedSenders := range distributeFailedMap {
|
||||
if err := b.bridgeBatchDepositEventOrm.UpdateDistributeFailedStatus(ctx, batchIndex, distributeFailedSenders); err != nil {
|
||||
log.Error("failed to update L1 bridge batch distribute failed event", "batchIndex", batchIndex, "failed senders", distributeFailedSenders, "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// L2InsertOrUpdate inserts or updates L2 messages
|
||||
func (b *EventUpdateLogic) L2InsertOrUpdate(ctx context.Context, l2FetcherResult *L2FilterResult) error {
|
||||
if err := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages); err != nil {
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
btypes "scroll-tech/bridge-history-api/internal/types"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
@@ -35,20 +36,23 @@ const (
|
||||
|
||||
// HistoryLogic services.
|
||||
type HistoryLogic struct {
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
batchEventOrm *orm.BatchEvent
|
||||
redis *redis.Client
|
||||
singleFlight singleflight.Group
|
||||
cacheMetrics *cacheMetrics
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
batchEventOrm *orm.BatchEvent
|
||||
bridgeBatchDepositOrm *orm.BridgeBatchDepositEvent
|
||||
|
||||
redis *redis.Client
|
||||
singleFlight singleflight.Group
|
||||
cacheMetrics *cacheMetrics
|
||||
}
|
||||
|
||||
// NewHistoryLogic returns bridge history services.
|
||||
func NewHistoryLogic(db *gorm.DB, redis *redis.Client) *HistoryLogic {
|
||||
logic := &HistoryLogic{
|
||||
crossMessageOrm: orm.NewCrossMessage(db),
|
||||
batchEventOrm: orm.NewBatchEvent(db),
|
||||
redis: redis,
|
||||
cacheMetrics: initCacheMetrics(),
|
||||
crossMessageOrm: orm.NewCrossMessage(db),
|
||||
batchEventOrm: orm.NewBatchEvent(db),
|
||||
bridgeBatchDepositOrm: orm.NewBridgeBatchDepositEvent(db),
|
||||
redis: redis,
|
||||
cacheMetrics: initCacheMetrics(),
|
||||
}
|
||||
return logic
|
||||
}
|
||||
@@ -72,25 +76,28 @@ func (h *HistoryLogic) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, a
|
||||
log.Info("cache miss", "cache key", cacheKey)
|
||||
|
||||
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
|
||||
var messages []*orm.CrossMessage
|
||||
messages, err = h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var txHistoryInfos []*types.TxHistoryInfo
|
||||
crossMessages, getErr := h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
return messages, nil
|
||||
for _, message := range crossMessages {
|
||||
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
|
||||
}
|
||||
return txHistoryInfos, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 claimable withdrawals by address", "address", address, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
messages, ok := result.([]*orm.CrossMessage)
|
||||
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
|
||||
if !ok {
|
||||
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
|
||||
return nil, 0, errors.New("unexpected error")
|
||||
}
|
||||
|
||||
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
|
||||
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
|
||||
}
|
||||
|
||||
// GetL2WithdrawalsByAddress gets all withdrawal txs under given address.
|
||||
@@ -112,25 +119,28 @@ func (h *HistoryLogic) GetL2WithdrawalsByAddress(ctx context.Context, address st
|
||||
log.Info("cache miss", "cache key", cacheKey)
|
||||
|
||||
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
|
||||
var messages []*orm.CrossMessage
|
||||
messages, err = h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var txHistoryInfos []*types.TxHistoryInfo
|
||||
crossMessages, getErr := h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
return messages, nil
|
||||
for _, message := range crossMessages {
|
||||
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
|
||||
}
|
||||
return txHistoryInfos, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 withdrawals by address", "address", address, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
messages, ok := result.([]*orm.CrossMessage)
|
||||
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
|
||||
if !ok {
|
||||
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
|
||||
return nil, 0, errors.New("unexpected error")
|
||||
}
|
||||
|
||||
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
|
||||
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
|
||||
}
|
||||
|
||||
// GetTxsByAddress gets tx infos under given address.
|
||||
@@ -152,25 +162,36 @@ func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address string, page
|
||||
log.Info("cache miss", "cache key", cacheKey)
|
||||
|
||||
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
|
||||
var messages []*orm.CrossMessage
|
||||
messages, err = h.crossMessageOrm.GetTxsByAddress(ctx, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var txHistoryInfos []*types.TxHistoryInfo
|
||||
crossMessages, getErr := h.crossMessageOrm.GetTxsByAddress(ctx, address)
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
return messages, nil
|
||||
for _, message := range crossMessages {
|
||||
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
|
||||
}
|
||||
|
||||
batchDepositMessages, getErr := h.bridgeBatchDepositOrm.GetTxsByAddress(ctx, address)
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
for _, message := range batchDepositMessages {
|
||||
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromBridgeBatchDepositMessage(message))
|
||||
}
|
||||
return txHistoryInfos, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("failed to get txs by address", "address", address, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
messages, ok := result.([]*orm.CrossMessage)
|
||||
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
|
||||
if !ok {
|
||||
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
|
||||
return nil, 0, errors.New("unexpected error")
|
||||
}
|
||||
|
||||
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
|
||||
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
|
||||
}
|
||||
|
||||
// GetTxsByHashes gets tx infos under given tx hashes.
|
||||
@@ -218,15 +239,24 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([
|
||||
}
|
||||
|
||||
if len(uncachedHashes) > 0 {
|
||||
messages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
|
||||
crossMessages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
|
||||
if err != nil {
|
||||
log.Error("failed to get messages by tx hashes", "hashes", uncachedHashes)
|
||||
log.Error("failed to get cross messages by tx hashes", "hashes", uncachedHashes)
|
||||
return nil, err
|
||||
}
|
||||
for _, message := range crossMessages {
|
||||
txHistories = append(txHistories, getTxHistoryInfoFromCrossMessage(message))
|
||||
}
|
||||
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
for _, message := range messages {
|
||||
txHistories = append(txHistories, getTxHistoryInfo(message))
|
||||
batchDepositMessages, err := h.bridgeBatchDepositOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
|
||||
if err != nil {
|
||||
log.Error("failed to get batch deposit messages by tx hashes", "hashes", uncachedHashes)
|
||||
return nil, err
|
||||
}
|
||||
for _, message := range batchDepositMessages {
|
||||
txHistories = append(txHistories, getTxHistoryInfoFromBridgeBatchDepositMessage(message))
|
||||
}
|
||||
|
||||
resultMap := make(map[string]*types.TxHistoryInfo)
|
||||
@@ -260,19 +290,19 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
|
||||
func getTxHistoryInfoFromCrossMessage(message *orm.CrossMessage) *types.TxHistoryInfo {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
MessageHash: message.MessageHash,
|
||||
TokenType: orm.TokenType(message.TokenType),
|
||||
TokenType: btypes.TokenType(message.TokenType),
|
||||
TokenIDs: utils.ConvertStringToStringArray(message.TokenIDs),
|
||||
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmounts),
|
||||
L1TokenAddress: message.L1TokenAddress,
|
||||
L2TokenAddress: message.L2TokenAddress,
|
||||
MessageType: orm.MessageType(message.MessageType),
|
||||
TxStatus: orm.TxStatusType(message.TxStatus),
|
||||
MessageType: btypes.MessageType(message.MessageType),
|
||||
TxStatus: btypes.TxStatusType(message.TxStatus),
|
||||
BlockTimestamp: message.BlockTimestamp,
|
||||
}
|
||||
if txHistory.MessageType == orm.MessageTypeL1SentMessage {
|
||||
if txHistory.MessageType == btypes.MessageTypeL1SentMessage {
|
||||
txHistory.Hash = message.L1TxHash
|
||||
txHistory.ReplayTxHash = message.L1ReplayTxHash
|
||||
txHistory.RefundTxHash = message.L1RefundTxHash
|
||||
@@ -288,7 +318,7 @@ func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
|
||||
Hash: message.L1TxHash,
|
||||
BlockNumber: message.L1BlockNumber,
|
||||
}
|
||||
if orm.RollupStatusType(message.RollupStatus) == orm.RollupStatusTypeFinalized {
|
||||
if btypes.RollupStatusType(message.RollupStatus) == btypes.RollupStatusTypeFinalized {
|
||||
txHistory.ClaimInfo = &types.ClaimInfo{
|
||||
From: message.MessageFrom,
|
||||
To: message.MessageTo,
|
||||
@@ -306,6 +336,28 @@ func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
|
||||
return txHistory
|
||||
}
|
||||
|
||||
func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepositEvent) *types.TxHistoryInfo {
|
||||
txHistory := &types.TxHistoryInfo{
|
||||
Hash: message.L1TxHash,
|
||||
TokenType: btypes.TokenType(message.TokenType),
|
||||
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmount),
|
||||
BlockNumber: message.L1BlockNumber,
|
||||
MessageType: btypes.MessageTypeL1BatchDeposit,
|
||||
TxStatus: btypes.TxStatusType(message.TxStatus),
|
||||
CounterpartChainTx: &types.CounterpartChainTx{
|
||||
Hash: message.L2TxHash,
|
||||
BlockNumber: message.L2BlockNumber,
|
||||
},
|
||||
BlockTimestamp: message.BlockTimestamp,
|
||||
BatchDepositFee: message.Fee,
|
||||
}
|
||||
if txHistory.TokenType != btypes.TokenTypeETH {
|
||||
txHistory.L1TokenAddress = message.L1TokenAddress
|
||||
txHistory.L2TokenAddress = message.L2TokenAddress
|
||||
}
|
||||
return txHistory
|
||||
}
|
||||
|
||||
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
|
||||
start := int64((pageNum - 1) * pageSize)
|
||||
end := start + int64(pageSize) - 1
|
||||
@@ -320,7 +372,7 @@ func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pa
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
values, err := h.redis.ZRange(ctx, cacheKey, start, end).Result()
|
||||
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zrange result", "error", err)
|
||||
return nil, 0, false, err
|
||||
@@ -356,13 +408,13 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
|
||||
}
|
||||
} else {
|
||||
// The transactions are sorted, thus we set the score as their indices.
|
||||
for i, tx := range txs {
|
||||
for _, tx := range txs {
|
||||
txBytes, err := json.Marshal(tx)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal transaction to json", "error", err)
|
||||
return err
|
||||
}
|
||||
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(i), Member: txBytes}).Err(); err != nil {
|
||||
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(tx.BlockTimestamp), Member: txBytes}).Err(); err != nil {
|
||||
log.Error("failed to add transaction to sorted set", "error", err)
|
||||
return err
|
||||
}
|
||||
@@ -381,12 +433,7 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, messages []*orm.CrossMessage, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
for _, message := range messages {
|
||||
txHistories = append(txHistories, getTxHistoryInfo(message))
|
||||
}
|
||||
|
||||
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, txHistories []*types.TxHistoryInfo, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
|
||||
err := h.cacheTxsInfo(ctx, cacheKey, txHistories)
|
||||
if err != nil {
|
||||
log.Error("failed to cache txs info", "key", cacheKey, "err", err)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
btypes "scroll-tech/bridge-history-api/internal/types"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
@@ -30,8 +31,60 @@ func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1Ev
|
||||
}
|
||||
}
|
||||
|
||||
// ParseL1CrossChainEventLogs parses L1 watched cross chain events.
|
||||
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
// ParseL1CrossChainEventLogs parse l1 cross chain event logs
|
||||
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, []*orm.BridgeBatchDepositEvent, error) {
|
||||
l1CrossChainDepositMessages, l1CrossChainRelayedMessages, err := e.ParseL1SingleCrossChainEventLogs(ctx, logs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
l1BridgeBatchDepositMessages, err := e.ParseL1BridgeBatchDepositCrossChainEventLogs(logs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return l1CrossChainDepositMessages, l1CrossChainRelayedMessages, l1BridgeBatchDepositMessages, nil
|
||||
}
|
||||
|
||||
// ParseL1BridgeBatchDepositCrossChainEventLogs parse L1 watched batch bridge cross chain events.
|
||||
func (e *L1EventParser) ParseL1BridgeBatchDepositCrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.BridgeBatchDepositEvent, error) {
|
||||
var l1BridgeBatchDepositMessages []*orm.BridgeBatchDepositEvent
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1BridgeBatchDepositSig:
|
||||
event := backendabi.L1BatchBridgeGatewayDeposit{}
|
||||
if err := utils.UnpackLog(backendabi.L1BatchBridgeGatewayABI, &event, "Deposit", vlog); err != nil {
|
||||
log.Error("Failed to unpack batch bridge gateway deposit event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tokenType btypes.TokenType
|
||||
if event.Token == common.HexToAddress("0") {
|
||||
tokenType = btypes.TokenTypeETH
|
||||
} else {
|
||||
tokenType = btypes.TokenTypeERC20
|
||||
}
|
||||
|
||||
l1BridgeBatchDepositMessages = append(l1BridgeBatchDepositMessages, &orm.BridgeBatchDepositEvent{
|
||||
TokenType: int(tokenType),
|
||||
Sender: event.Sender.String(),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
TokenAmount: event.Amount.String(),
|
||||
Fee: event.Fee.String(),
|
||||
L1TokenAddress: event.Token.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
L1TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(btypes.TxStatusBridgeBatchDeposit),
|
||||
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
|
||||
L1LogIndex: vlog.Index,
|
||||
})
|
||||
}
|
||||
}
|
||||
return l1BridgeBatchDepositMessages, nil
|
||||
}
|
||||
|
||||
// ParseL1SingleCrossChainEventLogs parses L1 watched single cross chain events.
|
||||
func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l1DepositMessages []*orm.CrossMessage
|
||||
var l1RelayedMessages []*orm.CrossMessage
|
||||
for _, vlog := range logs {
|
||||
@@ -45,7 +98,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeETH)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeETH)
|
||||
lastMessage.TokenAmounts = event.Amount.String()
|
||||
case backendabi.L1DepositERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
@@ -57,7 +110,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC20)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeERC20)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenAmounts = event.Amount.String()
|
||||
@@ -70,7 +123,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC721)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeERC721)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = event.TokenID.String()
|
||||
@@ -83,7 +136,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC721)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeERC721)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
|
||||
@@ -96,7 +149,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC1155)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = event.TokenID.String()
|
||||
@@ -110,7 +163,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC1155)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
|
||||
@@ -130,12 +183,12 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
Sender: from,
|
||||
Receiver: event.Target.String(),
|
||||
TokenType: int(orm.TokenTypeETH),
|
||||
TokenType: int(btypes.TokenTypeETH),
|
||||
L1TxHash: vlog.TxHash.String(),
|
||||
TokenAmounts: event.Value.String(),
|
||||
MessageNonce: event.MessageNonce.Uint64(),
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
TxStatus: int(orm.TxStatusTypeSent),
|
||||
MessageType: int(btypes.MessageTypeL1SentMessage),
|
||||
TxStatus: int(btypes.TxStatusTypeSent),
|
||||
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
|
||||
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
|
||||
})
|
||||
@@ -149,8 +202,8 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
|
||||
MessageHash: event.MessageHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
L1TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(orm.TxStatusTypeRelayed),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
TxStatus: int(btypes.TxStatusTypeRelayed),
|
||||
MessageType: int(btypes.MessageTypeL2SentMessage),
|
||||
})
|
||||
case backendabi.L1FailedRelayedMessageEventSig:
|
||||
event := backendabi.L1FailedRelayedMessageEvent{}
|
||||
@@ -162,8 +215,8 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
|
||||
MessageHash: event.MessageHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
L1TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(orm.TxStatusTypeFailedRelayed),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
TxStatus: int(btypes.TxStatusTypeFailedRelayed),
|
||||
MessageType: int(btypes.MessageTypeL2SentMessage),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -192,7 +245,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(orm.BatchStatusTypeCommitted),
|
||||
BatchStatus: int(btypes.BatchStatusTypeCommitted),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
BatchHash: event.BatchHash.String(),
|
||||
StartBlockNumber: startBlock,
|
||||
@@ -206,7 +259,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(orm.BatchStatusTypeReverted),
|
||||
BatchStatus: int(btypes.BatchStatusTypeReverted),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
BatchHash: event.BatchHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
@@ -218,7 +271,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(orm.BatchStatusTypeFinalized),
|
||||
BatchStatus: int(btypes.BatchStatusTypeFinalized),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
BatchHash: event.BatchHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
@@ -248,7 +301,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
// If the message hash is not found in the map, it's not a replayMessage or enforced tx (omitted); add it to the events.
|
||||
if _, exists := messageHashes[messageHash]; !exists {
|
||||
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
|
||||
EventType: orm.MessageQueueEventTypeQueueTransaction,
|
||||
EventType: btypes.MessageQueueEventTypeQueueTransaction,
|
||||
QueueIndex: event.QueueIndex,
|
||||
MessageHash: messageHash,
|
||||
TxHash: vlog.TxHash,
|
||||
@@ -263,7 +316,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
skippedIndices := utils.GetSkippedQueueIndices(event.StartIndex.Uint64(), event.SkippedBitmap)
|
||||
for _, index := range skippedIndices {
|
||||
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
|
||||
EventType: orm.MessageQueueEventTypeDequeueTransaction,
|
||||
EventType: btypes.MessageQueueEventTypeDequeueTransaction,
|
||||
QueueIndex: index,
|
||||
})
|
||||
}
|
||||
@@ -274,7 +327,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
return nil, err
|
||||
}
|
||||
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
|
||||
EventType: orm.MessageQueueEventTypeDropTransaction,
|
||||
EventType: btypes.MessageQueueEventTypeDropTransaction,
|
||||
QueueIndex: event.Index.Uint64(),
|
||||
TxHash: vlog.TxHash,
|
||||
})
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
btypes "scroll-tech/bridge-history-api/internal/types"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
@@ -25,11 +26,12 @@ const L1ReorgSafeDepth = 64
|
||||
|
||||
// L1FilterResult L1 fetcher result
|
||||
type L1FilterResult struct {
|
||||
DepositMessages []*orm.CrossMessage
|
||||
RelayedMessages []*orm.CrossMessage
|
||||
BatchEvents []*orm.BatchEvent
|
||||
MessageQueueEvents []*orm.MessageQueueEvent
|
||||
RevertedTxs []*orm.CrossMessage
|
||||
DepositMessages []*orm.CrossMessage
|
||||
RelayedMessages []*orm.CrossMessage
|
||||
BatchEvents []*orm.BatchEvent
|
||||
MessageQueueEvents []*orm.MessageQueueEvent
|
||||
RevertedTxs []*orm.CrossMessage
|
||||
BridgeBatchDepositEvents []*orm.BridgeBatchDepositEvent
|
||||
}
|
||||
|
||||
// L1FetcherLogic the L1 fetcher logic
|
||||
@@ -82,7 +84,7 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
common.HexToAddress(cfg.GatewayRouterAddr),
|
||||
}
|
||||
|
||||
// Optional erc20 gateways.
|
||||
// Optional gateways.
|
||||
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
@@ -98,6 +100,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.BatchBridgeGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
@@ -183,12 +190,12 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
|
||||
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
|
||||
L1TxHash: tx.Hash().String(),
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
MessageType: int(btypes.MessageTypeL1SentMessage),
|
||||
Sender: sender.String(),
|
||||
Receiver: (*tx.To()).String(),
|
||||
L1BlockNumber: receipt.BlockNumber.Uint64(),
|
||||
BlockTimestamp: block.Time(),
|
||||
TxStatus: int(orm.TxStatusTypeSentTxReverted),
|
||||
TxStatus: int(btypes.TxStatusTypeSentTxReverted),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -203,7 +210,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
|
||||
query.Topics[0] = make([]common.Hash, 13)
|
||||
query.Topics[0] = make([]common.Hash, 14)
|
||||
query.Topics[0][0] = backendabi.L1DepositETHSig
|
||||
query.Topics[0][1] = backendabi.L1DepositERC20Sig
|
||||
query.Topics[0][2] = backendabi.L1DepositERC721Sig
|
||||
@@ -217,6 +224,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
|
||||
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
|
||||
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
|
||||
query.Topics[0][13] = backendabi.L1BridgeBatchDepositSig
|
||||
|
||||
eventLogs, err := f.client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
@@ -252,7 +260,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
|
||||
l1DepositMessages, l1RelayedMessages, l1BridgeBatchDepositMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L1 cross chain event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
@@ -271,11 +279,12 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
}
|
||||
|
||||
res := L1FilterResult{
|
||||
DepositMessages: l1DepositMessages,
|
||||
RelayedMessages: l1RelayedMessages,
|
||||
BatchEvents: l1BatchEvents,
|
||||
MessageQueueEvents: l1MessageQueueEvents,
|
||||
RevertedTxs: l1RevertedTxs,
|
||||
DepositMessages: l1DepositMessages,
|
||||
RelayedMessages: l1RelayedMessages,
|
||||
BatchEvents: l1BatchEvents,
|
||||
MessageQueueEvents: l1MessageQueueEvents,
|
||||
RevertedTxs: l1RevertedTxs,
|
||||
BridgeBatchDepositEvents: l1BridgeBatchDepositMessages,
|
||||
}
|
||||
|
||||
f.updateMetrics(res)
|
||||
@@ -287,23 +296,23 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_gateway_router_transaction").Add(float64(len(res.RevertedTxs)))
|
||||
|
||||
for _, depositMessage := range res.DepositMessages {
|
||||
switch orm.TokenType(depositMessage.TokenType) {
|
||||
case orm.TokenTypeETH:
|
||||
switch btypes.TokenType(depositMessage.TokenType) {
|
||||
case btypes.TokenTypeETH:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_eth").Add(1)
|
||||
case orm.TokenTypeERC20:
|
||||
case btypes.TokenTypeERC20:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc20").Add(1)
|
||||
case orm.TokenTypeERC721:
|
||||
case btypes.TokenTypeERC721:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc721").Add(1)
|
||||
case orm.TokenTypeERC1155:
|
||||
case btypes.TokenTypeERC1155:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc1155").Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, relayedMessage := range res.RelayedMessages {
|
||||
switch orm.TxStatusType(relayedMessage.TxStatus) {
|
||||
case orm.TxStatusTypeRelayed:
|
||||
switch btypes.TxStatusType(relayedMessage.TxStatus) {
|
||||
case btypes.TxStatusTypeRelayed:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_relayed_message").Add(1)
|
||||
case orm.TxStatusTypeFailedRelayed:
|
||||
case btypes.TxStatusTypeFailedRelayed:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_relayed_message").Add(1)
|
||||
}
|
||||
// Have not tracked L1 relayed message reverted transaction yet.
|
||||
@@ -312,24 +321,33 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
|
||||
}
|
||||
|
||||
for _, batchEvent := range res.BatchEvents {
|
||||
switch orm.BatchStatusType(batchEvent.BatchStatus) {
|
||||
case orm.BatchStatusTypeCommitted:
|
||||
switch btypes.BatchStatusType(batchEvent.BatchStatus) {
|
||||
case btypes.BatchStatusTypeCommitted:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_commit_batch_event").Add(1)
|
||||
case orm.BatchStatusTypeReverted:
|
||||
case btypes.BatchStatusTypeReverted:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_revert_batch_event").Add(1)
|
||||
case orm.BatchStatusTypeFinalized:
|
||||
case btypes.BatchStatusTypeFinalized:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_finalize_batch_event").Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, messageQueueEvent := range res.MessageQueueEvents {
|
||||
switch messageQueueEvent.EventType {
|
||||
case orm.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
|
||||
case btypes.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_replay_message_or_enforced_transaction").Add(1)
|
||||
case orm.MessageQueueEventTypeDequeueTransaction:
|
||||
case btypes.MessageQueueEventTypeDequeueTransaction:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1)
|
||||
case orm.MessageQueueEventTypeDropTransaction:
|
||||
case btypes.MessageQueueEventTypeDropTransaction:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, bridgeBatchDepositEvent := range res.BridgeBatchDepositEvents {
|
||||
switch btypes.TokenType(bridgeBatchDepositEvent.TokenType) {
|
||||
case btypes.TokenTypeETH:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_bridge_batch_deposit_eth").Add(1)
|
||||
case btypes.TokenTypeERC20:
|
||||
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_bridge_batch_deposit_erc20").Add(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package logic
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
btypes "scroll-tech/bridge-history-api/internal/types"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
@@ -28,8 +30,72 @@ func NewL2EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L2Ev
|
||||
}
|
||||
}
|
||||
|
||||
// ParseL2EventLogs parses L2 watched events
|
||||
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
// ParseL2EventLogs parses L2 watchedevents
|
||||
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, []*orm.BridgeBatchDepositEvent, error) {
|
||||
l2WithdrawMessages, l2RelayedMessages, err := e.ParseL2SingleCrossChainEventLogs(ctx, logs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
l2BridgeBatchDepositMessages, err := e.ParseL2BridgeBatchDepositCrossChainEventLogs(logs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return l2WithdrawMessages, l2RelayedMessages, l2BridgeBatchDepositMessages, nil
|
||||
}
|
||||
|
||||
// ParseL2BridgeBatchDepositCrossChainEventLogs parses L2 watched bridge batch deposit events
|
||||
func (e *L2EventParser) ParseL2BridgeBatchDepositCrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.BridgeBatchDepositEvent, error) {
|
||||
var l2BridgeBatchDepositEvents []*orm.BridgeBatchDepositEvent
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L2BridgeBatchDistributeSig:
|
||||
event := backendabi.L2BatchBridgeGatewayBatchDistribute{}
|
||||
err := utils.UnpackLog(backendabi.L2BatchBridgeGatewayABI, &event, "BatchDistribute", vlog)
|
||||
if err != nil {
|
||||
log.Error("Failed to unpack BatchDistribute event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tokenType btypes.TokenType
|
||||
if event.L1Token == common.HexToAddress("0") {
|
||||
tokenType = btypes.TokenTypeETH
|
||||
} else {
|
||||
tokenType = btypes.TokenTypeERC20
|
||||
}
|
||||
|
||||
l2BridgeBatchDepositEvents = append(l2BridgeBatchDepositEvents, &orm.BridgeBatchDepositEvent{
|
||||
TokenType: int(tokenType),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
L2TokenAddress: event.L2Token.String(),
|
||||
L2BlockNumber: vlog.BlockNumber,
|
||||
L2TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(btypes.TxStatusBridgeBatchDistribute),
|
||||
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
|
||||
})
|
||||
case backendabi.L2BridgeBatchDistributeFailedSig:
|
||||
event := backendabi.L2BatchBridgeGatewayDistributeFailed{}
|
||||
err := utils.UnpackLog(backendabi.L2BatchBridgeGatewayABI, &event, "DistributeFailed", vlog)
|
||||
if err != nil {
|
||||
log.Error("Failed to unpack DistributeFailed event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
l2BridgeBatchDepositEvents = append(l2BridgeBatchDepositEvents, &orm.BridgeBatchDepositEvent{
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
L2TokenAddress: event.L2Token.String(),
|
||||
L2BlockNumber: vlog.BlockNumber,
|
||||
L2TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(btypes.TxStatusBridgeBatchDistributeFailed),
|
||||
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
|
||||
Sender: event.Receiver.String(),
|
||||
})
|
||||
}
|
||||
}
|
||||
return l2BridgeBatchDepositEvents, nil
|
||||
}
|
||||
|
||||
// ParseL2SingleCrossChainEventLogs parses L2 watched events
|
||||
func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l2WithdrawMessages []*orm.CrossMessage
|
||||
var l2RelayedMessages []*orm.CrossMessage
|
||||
for _, vlog := range logs {
|
||||
@@ -44,7 +110,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeETH)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeETH)
|
||||
lastMessage.TokenAmounts = event.Amount.String()
|
||||
case backendabi.L2WithdrawERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
@@ -56,7 +122,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC20)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeERC20)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenAmounts = event.Amount.String()
|
||||
@@ -70,7 +136,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC721)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeERC721)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = event.TokenID.String()
|
||||
@@ -84,7 +150,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC721)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeERC721)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
|
||||
@@ -98,7 +164,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC1155)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = event.TokenID.String()
|
||||
@@ -113,7 +179,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
lastMessage.Sender = event.From.String()
|
||||
lastMessage.Receiver = event.To.String()
|
||||
lastMessage.TokenType = int(orm.TokenTypeERC1155)
|
||||
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
|
||||
lastMessage.L1TokenAddress = event.L1Token.String()
|
||||
lastMessage.L2TokenAddress = event.L2Token.String()
|
||||
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
|
||||
@@ -134,7 +200,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
|
||||
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
|
||||
Sender: from,
|
||||
Receiver: event.Target.String(),
|
||||
TokenType: int(orm.TokenTypeETH),
|
||||
TokenType: int(btypes.TokenTypeETH),
|
||||
L2TxHash: vlog.TxHash.String(),
|
||||
TokenAmounts: event.Value.String(),
|
||||
MessageFrom: event.Sender.String(),
|
||||
@@ -142,8 +208,8 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
|
||||
MessageValue: event.Value.String(),
|
||||
MessageNonce: event.MessageNonce.Uint64(),
|
||||
MessageData: hexutil.Encode(event.Message),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
TxStatus: int(orm.TxStatusTypeSent),
|
||||
MessageType: int(btypes.MessageTypeL2SentMessage),
|
||||
TxStatus: int(btypes.TxStatusTypeSent),
|
||||
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
|
||||
L2BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
@@ -158,8 +224,8 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
|
||||
MessageHash: event.MessageHash.String(),
|
||||
L2BlockNumber: vlog.BlockNumber,
|
||||
L2TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(orm.TxStatusTypeRelayed),
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
TxStatus: int(btypes.TxStatusTypeRelayed),
|
||||
MessageType: int(btypes.MessageTypeL1SentMessage),
|
||||
})
|
||||
case backendabi.L2FailedRelayedMessageEventSig:
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
@@ -172,8 +238,8 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
|
||||
MessageHash: event.MessageHash.String(),
|
||||
L2BlockNumber: vlog.BlockNumber,
|
||||
L2TxHash: vlog.TxHash.String(),
|
||||
TxStatus: int(orm.TxStatusTypeFailedRelayed),
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
TxStatus: int(btypes.TxStatusTypeFailedRelayed),
|
||||
MessageType: int(btypes.MessageTypeL1SentMessage),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
btypes "scroll-tech/bridge-history-api/internal/types"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
@@ -26,9 +27,10 @@ const L2ReorgSafeDepth = 256
|
||||
|
||||
// L2FilterResult the L2 filter result
|
||||
type L2FilterResult struct {
|
||||
WithdrawMessages []*orm.CrossMessage
|
||||
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
|
||||
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
|
||||
WithdrawMessages []*orm.CrossMessage
|
||||
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
|
||||
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
|
||||
BridgeBatchDepositMessage []*orm.BridgeBatchDepositEvent
|
||||
}
|
||||
|
||||
// L2FetcherLogic the L2 fetcher logic
|
||||
@@ -77,7 +79,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
common.HexToAddress(cfg.GatewayRouterAddr),
|
||||
}
|
||||
|
||||
// Optional erc20 gateways.
|
||||
// Optional gateways.
|
||||
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
@@ -93,6 +95,11 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.BatchBridgeGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L2FetcherLogic{
|
||||
@@ -164,9 +171,9 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
l2RevertedRelayedMessageTxs = append(l2RevertedRelayedMessageTxs, &orm.CrossMessage{
|
||||
MessageHash: common.BytesToHash(crypto.Keccak256(tx.AsL1MessageTx().Data)).String(),
|
||||
L2TxHash: tx.Hash().String(),
|
||||
TxStatus: int(orm.TxStatusTypeRelayTxReverted),
|
||||
TxStatus: int(btypes.TxStatusTypeRelayTxReverted),
|
||||
L2BlockNumber: receipt.BlockNumber.Uint64(),
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
MessageType: int(btypes.MessageTypeL1SentMessage),
|
||||
})
|
||||
}
|
||||
continue
|
||||
@@ -194,12 +201,12 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
|
||||
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
|
||||
L2TxHash: tx.Hash().String(),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
MessageType: int(btypes.MessageTypeL2SentMessage),
|
||||
Sender: sender.String(),
|
||||
Receiver: (*tx.To()).String(),
|
||||
L2BlockNumber: receipt.BlockNumber.Uint64(),
|
||||
BlockTimestamp: block.Time(),
|
||||
TxStatus: int(orm.TxStatusTypeSentTxReverted),
|
||||
TxStatus: int(btypes.TxStatusTypeSentTxReverted),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -214,7 +221,7 @@ func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
Addresses: f.addressList,
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 7)
|
||||
query.Topics[0] = make([]common.Hash, 9)
|
||||
query.Topics[0][0] = backendabi.L2WithdrawETHSig
|
||||
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
|
||||
query.Topics[0][2] = backendabi.L2WithdrawERC721Sig
|
||||
@@ -222,6 +229,8 @@ func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
query.Topics[0][4] = backendabi.L2SentMessageEventSig
|
||||
query.Topics[0][5] = backendabi.L2RelayedMessageEventSig
|
||||
query.Topics[0][6] = backendabi.L2FailedRelayedMessageEventSig
|
||||
query.Topics[0][7] = backendabi.L2BridgeBatchDistributeSig
|
||||
query.Topics[0][8] = backendabi.L2BridgeBatchDistributeFailedSig
|
||||
|
||||
eventLogs, err := f.client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
@@ -257,16 +266,17 @@ func (f *L2FetcherLogic) L2Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
|
||||
l2WithdrawMessages, l2RelayedMessages, l2BridgeBatchDepositMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L2 event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
res := L2FilterResult{
|
||||
WithdrawMessages: l2WithdrawMessages,
|
||||
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
|
||||
OtherRevertedTxs: revertedUserTxs,
|
||||
WithdrawMessages: l2WithdrawMessages,
|
||||
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
|
||||
OtherRevertedTxs: revertedUserTxs,
|
||||
BridgeBatchDepositMessage: l2BridgeBatchDepositMessages,
|
||||
}
|
||||
|
||||
f.updateMetrics(res)
|
||||
@@ -278,28 +288,37 @@ func (f *L2FetcherLogic) updateMetrics(res L2FilterResult) {
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_gateway_router_transaction").Add(float64(len(res.OtherRevertedTxs)))
|
||||
|
||||
for _, withdrawMessage := range res.WithdrawMessages {
|
||||
switch orm.TokenType(withdrawMessage.TokenType) {
|
||||
case orm.TokenTypeETH:
|
||||
switch btypes.TokenType(withdrawMessage.TokenType) {
|
||||
case btypes.TokenTypeETH:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_eth").Add(1)
|
||||
case orm.TokenTypeERC20:
|
||||
case btypes.TokenTypeERC20:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc20").Add(1)
|
||||
case orm.TokenTypeERC721:
|
||||
case btypes.TokenTypeERC721:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc721").Add(1)
|
||||
case orm.TokenTypeERC1155:
|
||||
case btypes.TokenTypeERC1155:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc1155").Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, relayedMessage := range res.RelayedMessages {
|
||||
switch orm.TxStatusType(relayedMessage.TxStatus) {
|
||||
case orm.TxStatusTypeRelayed:
|
||||
switch btypes.TxStatusType(relayedMessage.TxStatus) {
|
||||
case btypes.TxStatusTypeRelayed:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_relayed_message").Add(1)
|
||||
case orm.TxStatusTypeFailedRelayed:
|
||||
case btypes.TxStatusTypeFailedRelayed:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_relayed_message").Add(1)
|
||||
case orm.TxStatusTypeRelayTxReverted:
|
||||
case btypes.TxStatusTypeRelayTxReverted:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_reverted_relayed_message_transaction").Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, bridgeBatchDepositMessage := range res.BridgeBatchDepositMessage {
|
||||
switch btypes.TxStatusType(bridgeBatchDepositMessage.TxStatus) {
|
||||
case btypes.TxStatusBridgeBatchDistribute:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_bridge_batch_distribute_message").Add(1)
|
||||
case btypes.TxStatusBridgeBatchDistributeFailed:
|
||||
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_bridge_batch_distribute_failed_message").Add(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isTransactionToGateway(tx *types.Transaction, gatewayList []common.Address) bool {
|
||||
|
||||
@@ -7,26 +7,8 @@ import (
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// BatchStatusType represents the type of batch status.
|
||||
type BatchStatusType int
|
||||
|
||||
// Constants for BatchStatusType.
|
||||
const (
|
||||
BatchStatusTypeUnknown BatchStatusType = iota
|
||||
BatchStatusTypeCommitted
|
||||
BatchStatusTypeReverted
|
||||
BatchStatusTypeFinalized
|
||||
)
|
||||
|
||||
// UpdateStatusType represents the whether batch info is updated in message table.
|
||||
type UpdateStatusType int
|
||||
|
||||
// Constants for UpdateStatusType.
|
||||
const (
|
||||
UpdateStatusTypeUnupdated UpdateStatusType = iota
|
||||
UpdateStatusTypeUpdated
|
||||
btypes "scroll-tech/bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// BatchEvent represents a batch event.
|
||||
@@ -77,8 +59,8 @@ func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, block
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&BatchEvent{})
|
||||
db = db.Where("end_block_number <= ?", blockHeight)
|
||||
db = db.Where("batch_status = ?", BatchStatusTypeFinalized)
|
||||
db = db.Where("update_status = ?", UpdateStatusTypeUnupdated)
|
||||
db = db.Where("batch_status = ?", btypes.BatchStatusTypeFinalized)
|
||||
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
|
||||
db = db.Order("batch_index asc")
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
@@ -96,8 +78,8 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&BatchEvent{})
|
||||
updateFields := make(map[string]interface{})
|
||||
switch BatchStatusType(l1BatchEvent.BatchStatus) {
|
||||
case BatchStatusTypeCommitted:
|
||||
switch btypes.BatchStatusType(l1BatchEvent.BatchStatus) {
|
||||
case btypes.BatchStatusTypeCommitted:
|
||||
// Use the clause to either insert or ignore on conflict
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "batch_hash"}},
|
||||
@@ -106,17 +88,17 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
|
||||
if err := db.Create(l1BatchEvent).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert or ignore batch event, error: %w", err)
|
||||
}
|
||||
case BatchStatusTypeFinalized:
|
||||
case btypes.BatchStatusTypeFinalized:
|
||||
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
|
||||
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
|
||||
updateFields["batch_status"] = BatchStatusTypeFinalized
|
||||
updateFields["batch_status"] = btypes.BatchStatusTypeFinalized
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch event, error: %w", err)
|
||||
}
|
||||
case BatchStatusTypeReverted:
|
||||
case btypes.BatchStatusTypeReverted:
|
||||
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
|
||||
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
|
||||
updateFields["batch_status"] = BatchStatusTypeReverted
|
||||
updateFields["batch_status"] = btypes.BatchStatusTypeReverted
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch event, error: %w", err)
|
||||
}
|
||||
@@ -135,7 +117,7 @@ func (c *BatchEvent) UpdateBatchEventStatus(ctx context.Context, batchIndex uint
|
||||
db = db.Model(&BatchEvent{})
|
||||
db = db.Where("batch_index = ?", batchIndex)
|
||||
updateFields := map[string]interface{}{
|
||||
"update_status": UpdateStatusTypeUpdated,
|
||||
"update_status": btypes.UpdateStatusTypeUpdated,
|
||||
}
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch event status, batchIndex: %d, error: %w", batchIndex, err)
|
||||
|
||||
163
bridge-history-api/internal/orm/bridge_batch_deposit.go
Normal file
163
bridge-history-api/internal/orm/bridge_batch_deposit.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// BridgeBatchDepositEvent represents the bridge batch deposit event.
|
||||
type BridgeBatchDepositEvent struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint64 `json:"id" gorm:"column:id;primary_key"`
|
||||
TokenType int `json:"token_type" gorm:"column:token_type"`
|
||||
Sender string `json:"sender" gorm:"column:sender"`
|
||||
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
|
||||
TokenAmount string `json:"token_amount" gorm:"column:token_amount"`
|
||||
Fee string `json:"fee" gorm:"column:fee"`
|
||||
L1TokenAddress string `json:"l1_token_address" gorm:"column:l1_token_address"`
|
||||
L2TokenAddress string `json:"l2_token_address" gorm:"column:l2_token_address"`
|
||||
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
|
||||
L2BlockNumber uint64 `json:"l2_block_number" gorm:"column:l2_block_number"`
|
||||
L1TxHash string `json:"l1_tx_hash" gorm:"column:l1_tx_hash"`
|
||||
L1LogIndex uint `json:"l1_log_index" gorm:"column:l1_log_index"`
|
||||
L2TxHash string `json:"l2_tx_hash" gorm:"column:l2_tx_hash"`
|
||||
TxStatus int `json:"tx_status" gorm:"column:tx_status"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp" gorm:"column:block_timestamp"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
|
||||
}
|
||||
|
||||
// TableName returns the table name for the BridgeBatchDepositEvent model.
|
||||
func (*BridgeBatchDepositEvent) TableName() string {
|
||||
return "bridge_batch_deposit_event_v2"
|
||||
}
|
||||
|
||||
// NewBridgeBatchDepositEvent returns a new instance of BridgeBatchDepositEvent.
|
||||
func NewBridgeBatchDepositEvent(db *gorm.DB) *BridgeBatchDepositEvent {
|
||||
return &BridgeBatchDepositEvent{db: db}
|
||||
}
|
||||
|
||||
// GetTxsByAddress returns the txs by address
|
||||
func (c *BridgeBatchDepositEvent) GetTxsByAddress(ctx context.Context, sender string) ([]*BridgeBatchDepositEvent, error) {
|
||||
var messages []*BridgeBatchDepositEvent
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&BridgeBatchDepositEvent{})
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to get all txs by sender address, sender: %v, error: %w", sender, err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// GetMessagesByTxHashes retrieves all BridgeBatchDepositEvent from the database that match the provided transaction hashes.
|
||||
func (c *BridgeBatchDepositEvent) GetMessagesByTxHashes(ctx context.Context, txHashes []string) ([]*BridgeBatchDepositEvent, error) {
|
||||
var messages []*BridgeBatchDepositEvent
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&BridgeBatchDepositEvent{})
|
||||
db = db.Where("l1_tx_hash in (?) or l2_tx_hash in (?)", txHashes, txHashes)
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to GetMessagesByTxHashes by tx hashes, tx hashes: %v, error: %w", txHashes, err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// GetMessageL1SyncedHeightInDB returns the l1 latest bridge batch deposit message height from the database
|
||||
func (c *BridgeBatchDepositEvent) GetMessageL1SyncedHeightInDB(ctx context.Context) (uint64, error) {
|
||||
var message BridgeBatchDepositEvent
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&BridgeBatchDepositEvent{})
|
||||
db = db.Order("l1_block_number desc")
|
||||
|
||||
err := db.First(&message).Error
|
||||
if err != nil && errors.Is(gorm.ErrRecordNotFound, err) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get l1 latest processed height, error: %w", err)
|
||||
}
|
||||
|
||||
return message.L1BlockNumber, nil
|
||||
}
|
||||
|
||||
// GetMessageL2SyncedHeightInDB returns the l2 latest bridge batch deposit message height from the database
|
||||
func (c *BridgeBatchDepositEvent) GetMessageL2SyncedHeightInDB(ctx context.Context) (uint64, error) {
|
||||
var message BridgeBatchDepositEvent
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&BridgeBatchDepositEvent{})
|
||||
db = db.Order("l2_block_number desc")
|
||||
|
||||
err := db.First(&message).Error
|
||||
if err != nil && errors.Is(gorm.ErrRecordNotFound, err) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get l2 latest processed height, error: %w", err)
|
||||
}
|
||||
|
||||
return message.L2BlockNumber, nil
|
||||
}
|
||||
|
||||
// InsertOrUpdateL1BridgeBatchDepositEvent inserts or updates a new L1 BridgeBatchDepositEvent
|
||||
func (c *BridgeBatchDepositEvent) InsertOrUpdateL1BridgeBatchDepositEvent(ctx context.Context, l1BatchDepositEvents []*BridgeBatchDepositEvent) error {
|
||||
if len(l1BatchDepositEvents) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
db := c.db
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&BridgeBatchDepositEvent{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "l1_tx_hash"}, {Name: "l1_log_index"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"token_amount", "fee", "l1_block_number", "l1_token_address", "tx_status", "block_timestamp"}),
|
||||
})
|
||||
if err := db.Create(l1BatchDepositEvents).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert message, error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBatchEventStatus updates the tx_status of BridgeBatchDepositEvent given batch index
|
||||
func (c *BridgeBatchDepositEvent) UpdateBatchEventStatus(ctx context.Context, distributeMessage *BridgeBatchDepositEvent) error {
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&BridgeBatchDepositEvent{})
|
||||
db = db.Where("batch_index = ?", distributeMessage.BatchIndex)
|
||||
db = db.Where("token_type = ?", distributeMessage.TokenType)
|
||||
updateFields := map[string]interface{}{
|
||||
"l2_token_address": distributeMessage.L2TokenAddress,
|
||||
"l2_block_number": distributeMessage.L2BlockNumber,
|
||||
"l2_tx_hash": distributeMessage.L2TxHash,
|
||||
"tx_status": types.TxStatusBridgeBatchDistribute,
|
||||
}
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to UpdateBatchEventStatus, batchIndex: %d, error: %w", distributeMessage.BatchIndex, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateDistributeFailedStatus updates the tx_status of BridgeBatchDepositEvent given batch index and senders
|
||||
func (c *BridgeBatchDepositEvent) UpdateDistributeFailedStatus(ctx context.Context, batchIndex uint64, senders []string) error {
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&BridgeBatchDepositEvent{})
|
||||
db = db.Where("batch_index = ?", batchIndex)
|
||||
db = db.Where("sender in (?)", senders)
|
||||
updateFields := map[string]interface{}{
|
||||
"tx_status": types.TxStatusBridgeBatchDistributeFailed,
|
||||
}
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to UpdateDistributeFailedStatus, batchIndex: %d, senders:%v, error: %w", batchIndex, senders, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -8,75 +8,15 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// TokenType represents the type of token.
|
||||
type TokenType int
|
||||
"scroll-tech/bridge-history-api/internal/types"
|
||||
|
||||
// Constants for TokenType.
|
||||
const (
|
||||
TokenTypeUnknown TokenType = iota
|
||||
TokenTypeETH
|
||||
TokenTypeERC20
|
||||
TokenTypeERC721
|
||||
TokenTypeERC1155
|
||||
)
|
||||
|
||||
// MessageType represents the type of message.
|
||||
type MessageType int
|
||||
|
||||
// Constants for MessageType.
|
||||
const (
|
||||
MessageTypeUnknown MessageType = iota
|
||||
MessageTypeL1SentMessage
|
||||
MessageTypeL2SentMessage
|
||||
)
|
||||
|
||||
// TxStatusType represents the status of a transaction.
|
||||
type TxStatusType int
|
||||
|
||||
// Constants for TxStatusType.
|
||||
const (
|
||||
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
|
||||
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
|
||||
// from a later status (e.g., relayed) back to "sent".
|
||||
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
|
||||
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
|
||||
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
|
||||
TxStatusTypeSent TxStatusType = iota
|
||||
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
|
||||
TxStatusTypeRelayed // Terminal status.
|
||||
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
|
||||
TxStatusTypeFailedRelayed
|
||||
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
|
||||
TxStatusTypeRelayTxReverted
|
||||
TxStatusTypeSkipped
|
||||
TxStatusTypeDropped // Terminal status.
|
||||
)
|
||||
|
||||
// RollupStatusType represents the status of a rollup.
|
||||
type RollupStatusType int
|
||||
|
||||
// Constants for RollupStatusType.
|
||||
const (
|
||||
RollupStatusTypeUnknown RollupStatusType = iota
|
||||
RollupStatusTypeFinalized // only batch finalized status is used.
|
||||
)
|
||||
|
||||
// MessageQueueEventType represents the type of message queue event.
|
||||
type MessageQueueEventType int
|
||||
|
||||
// Constants for MessageQueueEventType.
|
||||
const (
|
||||
MessageQueueEventTypeUnknown MessageQueueEventType = iota
|
||||
MessageQueueEventTypeQueueTransaction
|
||||
MessageQueueEventTypeDequeueTransaction
|
||||
MessageQueueEventTypeDropTransaction
|
||||
btypes "scroll-tech/bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// MessageQueueEvent struct represents the details of a batch event.
|
||||
type MessageQueueEvent struct {
|
||||
EventType MessageQueueEventType
|
||||
EventType btypes.MessageQueueEventType
|
||||
QueueIndex uint64
|
||||
|
||||
// Track replay tx hash and refund tx hash.
|
||||
@@ -132,15 +72,15 @@ func NewCrossMessage(db *gorm.DB) *CrossMessage {
|
||||
}
|
||||
|
||||
// GetMessageSyncedHeightInDB returns the latest synced cross message height from the database for a given message type.
|
||||
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType MessageType) (uint64, error) {
|
||||
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType btypes.MessageType) (uint64, error) {
|
||||
var message CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", messageType)
|
||||
switch {
|
||||
case messageType == MessageTypeL1SentMessage:
|
||||
case messageType == btypes.MessageTypeL1SentMessage:
|
||||
db = db.Order("l1_block_number desc")
|
||||
case messageType == MessageTypeL2SentMessage:
|
||||
case messageType == btypes.MessageTypeL2SentMessage:
|
||||
db = db.Order("l2_block_number desc")
|
||||
}
|
||||
if err := db.First(&message).Error; err != nil {
|
||||
@@ -150,9 +90,9 @@ func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageTy
|
||||
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
|
||||
}
|
||||
switch {
|
||||
case messageType == MessageTypeL1SentMessage:
|
||||
case messageType == btypes.MessageTypeL1SentMessage:
|
||||
return message.L1BlockNumber, nil
|
||||
case messageType == MessageTypeL2SentMessage:
|
||||
case messageType == btypes.MessageTypeL2SentMessage:
|
||||
return message.L2BlockNumber, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid message type: %v", messageType)
|
||||
@@ -164,8 +104,8 @@ func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*Cro
|
||||
var message CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
|
||||
db = db.Where("rollup_status = ?", RollupStatusTypeFinalized)
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
|
||||
db = db.Order("message_nonce desc")
|
||||
if err := db.First(&message).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
@@ -183,8 +123,8 @@ func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBl
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("l2_block_number >= ?", startBlock)
|
||||
db = db.Where("l2_block_number <= ?", endBlock)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeSentTxReverted)
|
||||
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
|
||||
db = db.Where("tx_status != ?", types.TxStatusTypeSentTxReverted)
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Order("message_nonce asc")
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
@@ -212,8 +152,8 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
|
||||
var messages []*CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
|
||||
db = db.Where("tx_status = ?", TxStatusTypeSent)
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
@@ -228,7 +168,7 @@ func (c *CrossMessage) GetL2WithdrawalsByAddress(ctx context.Context, sender str
|
||||
var messages []*CrossMessage
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
@@ -261,22 +201,22 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
|
||||
db = db.Model(&CrossMessage{})
|
||||
txStatusUpdateFields := make(map[string]interface{})
|
||||
switch l1MessageQueueEvent.EventType {
|
||||
case MessageQueueEventTypeQueueTransaction:
|
||||
case btypes.MessageQueueEventTypeQueueTransaction:
|
||||
continue
|
||||
case MessageQueueEventTypeDequeueTransaction:
|
||||
case btypes.MessageQueueEventTypeDequeueTransaction:
|
||||
// do not over-write terminal statuses.
|
||||
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeDropped)
|
||||
db = db.Where("tx_status != ?", types.TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", types.TxStatusTypeDropped)
|
||||
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeSkipped
|
||||
case MessageQueueEventTypeDropTransaction:
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
|
||||
txStatusUpdateFields["tx_status"] = types.TxStatusTypeSkipped
|
||||
case btypes.MessageQueueEventTypeDropTransaction:
|
||||
// do not over-write terminal statuses.
|
||||
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeDropped)
|
||||
db = db.Where("tx_status != ?", types.TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", types.TxStatusTypeDropped)
|
||||
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeDropped
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
|
||||
txStatusUpdateFields["tx_status"] = types.TxStatusTypeDropped
|
||||
}
|
||||
if err := db.Updates(txStatusUpdateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
|
||||
@@ -290,9 +230,9 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
|
||||
db = db.Model(&CrossMessage{})
|
||||
txHashUpdateFields := make(map[string]interface{})
|
||||
switch l1MessageQueueEvent.EventType {
|
||||
case MessageQueueEventTypeDequeueTransaction:
|
||||
case btypes.MessageQueueEventTypeDequeueTransaction:
|
||||
continue
|
||||
case MessageQueueEventTypeQueueTransaction:
|
||||
case btypes.MessageQueueEventTypeQueueTransaction:
|
||||
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.
|
||||
// replayMessage case:
|
||||
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
|
||||
@@ -304,9 +244,9 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
|
||||
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
|
||||
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
|
||||
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
|
||||
case MessageQueueEventTypeDropTransaction:
|
||||
case btypes.MessageQueueEventTypeDropTransaction:
|
||||
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
|
||||
txHashUpdateFields["l1_refund_tx_hash"] = l1MessageQueueEvent.TxHash.String()
|
||||
}
|
||||
if err := db.Updates(txHashUpdateFields).Error; err != nil {
|
||||
@@ -320,12 +260,12 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
|
||||
func (c *CrossMessage) UpdateBatchStatusOfL2Withdrawals(ctx context.Context, startBlockNumber, endBlockNumber, batchIndex uint64) error {
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Where("l2_block_number >= ?", startBlockNumber)
|
||||
db = db.Where("l2_block_number <= ?", endBlockNumber)
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["batch_index"] = batchIndex
|
||||
updateFields["rollup_status"] = RollupStatusTypeFinalized
|
||||
updateFields["rollup_status"] = btypes.RollupStatusTypeFinalized
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch status of L2 sent messages, start: %v, end: %v, index: %v, error: %w", startBlockNumber, endBlockNumber, batchIndex, err)
|
||||
}
|
||||
@@ -462,7 +402,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
|
||||
mergedL2RelayedMessages := make(map[string]*CrossMessage)
|
||||
for _, message := range l2RelayedMessages {
|
||||
if existing, found := mergedL2RelayedMessages[message.MessageHash]; found {
|
||||
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
|
||||
if types.TxStatusType(message.TxStatus) == types.TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
|
||||
mergedL2RelayedMessages[message.MessageHash] = message
|
||||
}
|
||||
} else {
|
||||
@@ -489,8 +429,8 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
|
||||
Exprs: []clause.Expression{
|
||||
clause.And(
|
||||
// do not over-write terminal statuses.
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeDropped},
|
||||
),
|
||||
},
|
||||
},
|
||||
@@ -520,7 +460,7 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
|
||||
mergedL1RelayedMessages := make(map[string]*CrossMessage)
|
||||
for _, message := range l1RelayedMessages {
|
||||
if existing, found := mergedL1RelayedMessages[message.MessageHash]; found {
|
||||
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
|
||||
if types.TxStatusType(message.TxStatus) == types.TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
|
||||
mergedL1RelayedMessages[message.MessageHash] = message
|
||||
}
|
||||
} else {
|
||||
@@ -541,8 +481,8 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
|
||||
Exprs: []clause.Expression{
|
||||
clause.And(
|
||||
// do not over-write terminal statuses.
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeDropped},
|
||||
),
|
||||
},
|
||||
},
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
CREATE TABLE bridge_batch_deposit_event_v2
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
token_type SMALLINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
batch_index BIGINT DEFAULT NULL,
|
||||
token_amount VARCHAR NOT NULL,
|
||||
fee VARCHAR NOT NULL,
|
||||
l1_token_address VARCHAR DEFAULT NULL,
|
||||
l2_token_address VARCHAR DEFAULT NULL,
|
||||
l1_block_number BIGINT DEFAULT NULL,
|
||||
l2_block_number BIGINT DEFAULT NULL,
|
||||
l1_tx_hash VARCHAR DEFAULT NULL,
|
||||
l1_log_index INTEGER DEFAULT NULL,
|
||||
l2_tx_hash VARCHAR DEFAULT NULL,
|
||||
tx_status SMALLINT NOT NULL,
|
||||
block_timestamp BIGINT NOT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX idx_l1hash_l1logindex ON bridge_batch_deposit_event_v2 (l1_tx_hash, l1_log_index);
|
||||
CREATE INDEX IF NOT EXISTS idx_bbde_batchidx_sender ON bridge_batch_deposit_event_v2 (batch_index, sender);
|
||||
CREATE INDEX IF NOT EXISTS idx_bbde_l1_block_number ON bridge_batch_deposit_event_v2 (l1_block_number DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_bbde_l2_block_number ON bridge_batch_deposit_event_v2 (l2_block_number DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_bbde_l1_tx_hash ON bridge_batch_deposit_event_v2 (l1_tx_hash DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_bbde_l2_tx_hash ON bridge_batch_deposit_event_v2 (l2_tx_hash DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_bbde_sender_block_timestamp ON bridge_batch_deposit_event_v2 (sender, block_timestamp DESC);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE IF EXISTS bridge_batch_deposit_event_v2;
|
||||
-- +goose StatementEnd
|
||||
@@ -27,9 +27,9 @@ func Route(router *gin.Engine, conf *config.Config, reg prometheus.Registerer) {
|
||||
|
||||
r := router.Group("api/")
|
||||
|
||||
r.GET("/txs", api.HistoryCtrler.GetTxsByAddress)
|
||||
r.GET("/l2/withdrawals", api.HistoryCtrler.GetL2WithdrawalsByAddress)
|
||||
r.GET("/l2/unclaimed/withdrawals", api.HistoryCtrler.GetL2UnclaimedWithdrawalsByAddress)
|
||||
r.GET("/txs", api.TxsByAddressCtl.GetTxsByAddress)
|
||||
r.GET("/l2/withdrawals", api.L2WithdrawalsByAddressCtl.GetL2WithdrawalsByAddress)
|
||||
r.GET("/l2/unclaimed/withdrawals", api.L2UnclaimedWithdrawalsByAddressCtl.GetL2UnclaimedWithdrawalsByAddress)
|
||||
|
||||
r.POST("/txsbyhashes", api.HistoryCtrler.PostQueryTxsByHashes)
|
||||
r.POST("/txsbyhashes", api.TxsByHashesCtl.PostQueryTxsByHashes)
|
||||
}
|
||||
|
||||
93
bridge-history-api/internal/types/events.go
Normal file
93
bridge-history-api/internal/types/events.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package types
|
||||
|
||||
// TxStatusType represents the status of a transaction.
|
||||
type TxStatusType int
|
||||
|
||||
// Constants for TxStatusType.
|
||||
const (
|
||||
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
|
||||
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
|
||||
// from a later status (e.g., relayed) back to "sent".
|
||||
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
|
||||
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
|
||||
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
|
||||
TxStatusTypeSent TxStatusType = iota
|
||||
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
|
||||
TxStatusTypeRelayed // Terminal status.
|
||||
// TxStatusTypeFailedRelayed Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
|
||||
TxStatusTypeFailedRelayed
|
||||
// TxStatusTypeRelayTxReverted Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
|
||||
TxStatusTypeRelayTxReverted
|
||||
TxStatusTypeSkipped
|
||||
TxStatusTypeDropped // Terminal status.
|
||||
|
||||
// TxStatusBridgeBatchDeposit use deposit token to bridge batch deposit contract
|
||||
TxStatusBridgeBatchDeposit
|
||||
// TxStatusBridgeBatchDistribute bridge batch deposit contract distribute tokens to user success
|
||||
TxStatusBridgeBatchDistribute
|
||||
// TxStatusBridgeBatchDistributeFailed bridge batch deposit contract distribute tokens to user failed
|
||||
TxStatusBridgeBatchDistributeFailed
|
||||
)
|
||||
|
||||
// TokenType represents the type of token.
|
||||
type TokenType int
|
||||
|
||||
// Constants for TokenType.
|
||||
const (
|
||||
TokenTypeUnknown TokenType = iota
|
||||
TokenTypeETH
|
||||
TokenTypeERC20
|
||||
TokenTypeERC721
|
||||
TokenTypeERC1155
|
||||
)
|
||||
|
||||
// MessageType represents the type of message.
|
||||
type MessageType int
|
||||
|
||||
// Constants for MessageType.
|
||||
const (
|
||||
MessageTypeUnknown MessageType = iota
|
||||
MessageTypeL1SentMessage
|
||||
MessageTypeL2SentMessage
|
||||
MessageTypeL1BatchDeposit
|
||||
)
|
||||
|
||||
// RollupStatusType represents the status of a rollup.
|
||||
type RollupStatusType int
|
||||
|
||||
// Constants for RollupStatusType.
|
||||
const (
|
||||
RollupStatusTypeUnknown RollupStatusType = iota
|
||||
RollupStatusTypeFinalized // only batch finalized status is used.
|
||||
)
|
||||
|
||||
// MessageQueueEventType represents the type of message queue event.
|
||||
type MessageQueueEventType int
|
||||
|
||||
// Constants for MessageQueueEventType.
|
||||
const (
|
||||
MessageQueueEventTypeUnknown MessageQueueEventType = iota
|
||||
MessageQueueEventTypeQueueTransaction
|
||||
MessageQueueEventTypeDequeueTransaction
|
||||
MessageQueueEventTypeDropTransaction
|
||||
)
|
||||
|
||||
// BatchStatusType represents the type of batch status.
|
||||
type BatchStatusType int
|
||||
|
||||
// Constants for BatchStatusType.
|
||||
const (
|
||||
BatchStatusTypeUnknown BatchStatusType = iota
|
||||
BatchStatusTypeCommitted
|
||||
BatchStatusTypeReverted
|
||||
BatchStatusTypeFinalized
|
||||
)
|
||||
|
||||
// UpdateStatusType represents the whether batch info is updated in message table.
|
||||
type UpdateStatusType int
|
||||
|
||||
// Constants for UpdateStatusType.
|
||||
const (
|
||||
UpdateStatusTypeUnupdated UpdateStatusType = iota
|
||||
UpdateStatusTypeUpdated
|
||||
)
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -79,17 +77,18 @@ type TxHistoryInfo struct {
|
||||
ReplayTxHash string `json:"replay_tx_hash"`
|
||||
RefundTxHash string `json:"refund_tx_hash"`
|
||||
MessageHash string `json:"message_hash"`
|
||||
TokenType orm.TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
|
||||
TokenType TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
|
||||
TokenIDs []string `json:"token_ids"` // only for erc721 and erc1155
|
||||
TokenAmounts []string `json:"token_amounts"` // for eth and erc20, the length is 1, for erc721 and erc1155, the length could be > 1
|
||||
MessageType orm.MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
|
||||
MessageType MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
|
||||
L1TokenAddress string `json:"l1_token_address"`
|
||||
L2TokenAddress string `json:"l2_token_address"`
|
||||
BlockNumber uint64 `json:"block_number"`
|
||||
TxStatus orm.TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
|
||||
TxStatus TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
|
||||
CounterpartChainTx *CounterpartChainTx `json:"counterpart_chain_tx"`
|
||||
ClaimInfo *ClaimInfo `json:"claim_info"`
|
||||
BlockTimestamp uint64 `json:"block_timestamp"`
|
||||
BatchDepositFee string `json:"batch_deposit_fee"` // only for bridge batch deposit
|
||||
}
|
||||
|
||||
// RenderJSON renders response with json
|
||||
@@ -104,10 +104,12 @@ linters-settings:
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 3
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: false
|
||||
packages:
|
||||
- github.com/davecgh/go-spew/spew
|
||||
rules:
|
||||
main:
|
||||
files:
|
||||
- $all
|
||||
deny:
|
||||
- pkg: "github.com/davecgh/go-spew/spew"
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -17,7 +17,7 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
ifeq ($(GO_VERSION),)
|
||||
GO_VERSION=1.20
|
||||
GO_VERSION=1.21
|
||||
endif
|
||||
ifeq ($(RUST_VERSION),)
|
||||
RUST_VERSION=nightly-2022-12-10
|
||||
RUST_VERSION=nightly-2023-12-03
|
||||
endif
|
||||
ifeq ($(PYTHON_VERSION),)
|
||||
PYTHON_VERSION=3.10
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
ARG CUDA_VERSION=11.7.1
|
||||
ARG GO_VERSION=1.20
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG GO_VERSION=1.21
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG GO_VERSION=1.21
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG GO_VERSION=1.21
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG GO_VERSION=1.21
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG ALPINE_VERSION=3.15
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# Start from the latest golang base image
|
||||
FROM golang:1.21
|
||||
|
||||
# Install Docker
|
||||
RUN apt-get update && apt-get install -y docker.io docker-compose
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /go/src/app
|
||||
|
||||
# This container will be executable
|
||||
ENTRYPOINT [ "/bin/bash" ]
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
const (
|
||||
// GolangCIVersion to be used for linting.
|
||||
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2"
|
||||
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.2"
|
||||
)
|
||||
|
||||
// GOBIN environment variable.
|
||||
@@ -51,7 +51,7 @@ func lint() {
|
||||
}
|
||||
|
||||
cmd = exec.Command(filepath.Join(goBin(), "golangci-lint"))
|
||||
cmd.Args = append(cmd.Args, "run", "--config", "../build/.golangci.yml")
|
||||
cmd.Args = append(cmd.Args, "run", "--config", "../build/.golangci.yml", "--timeout", "10m")
|
||||
|
||||
if *v {
|
||||
cmd.Args = append(cmd.Args, "-v")
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
package dockercompose
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/log"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
tc "github.com/testcontainers/testcontainers-go/modules/compose"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
// PoSL1TestEnv represents the config needed to test in PoS Layer 1.
|
||||
type PoSL1TestEnv struct {
|
||||
dockerComposeFile string
|
||||
compose tc.ComposeStack
|
||||
gethHTTPPort int
|
||||
hostPath string
|
||||
}
|
||||
|
||||
// NewPoSL1TestEnv creates and initializes a new instance of PoSL1TestEnv with a random HTTP port.
|
||||
func NewPoSL1TestEnv() (*PoSL1TestEnv, error) {
|
||||
rootDir, err := findProjectRootDir()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find project root directory: %v", err)
|
||||
}
|
||||
|
||||
hostPath, found := os.LookupEnv("HOST_PATH")
|
||||
if !found {
|
||||
hostPath = ""
|
||||
}
|
||||
|
||||
rnd, err := rand.Int(rand.Reader, big.NewInt(65536-1024))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate a random: %v", err)
|
||||
}
|
||||
gethHTTPPort := int(rnd.Int64()) + 1024
|
||||
|
||||
if err := os.Setenv("GETH_HTTP_PORT", fmt.Sprintf("%d", gethHTTPPort)); err != nil {
|
||||
return nil, fmt.Errorf("failed to set GETH_HTTP_PORT: %v", err)
|
||||
}
|
||||
|
||||
return &PoSL1TestEnv{
|
||||
dockerComposeFile: filepath.Join(rootDir, "common", "docker-compose", "l1", "docker-compose.yml"),
|
||||
gethHTTPPort: gethHTTPPort,
|
||||
hostPath: hostPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start starts the PoS L1 test environment by running the associated Docker Compose configuration.
|
||||
func (e *PoSL1TestEnv) Start() error {
|
||||
var err error
|
||||
e.compose, err = tc.NewDockerCompose([]string{e.dockerComposeFile}...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create docker compose: %w", err)
|
||||
}
|
||||
|
||||
env := map[string]string{
|
||||
"GETH_HTTP_PORT": fmt.Sprintf("%d", e.gethHTTPPort),
|
||||
}
|
||||
|
||||
if e.hostPath != "" {
|
||||
env["HOST_PATH"] = e.hostPath
|
||||
}
|
||||
|
||||
if err = e.compose.WaitForService("geth", wait.NewHTTPStrategy("/").WithPort("8545/tcp").WithStartupTimeout(15*time.Second)).WithEnv(env).Up(context.Background()); err != nil {
|
||||
if errStop := e.Stop(); errStop != nil {
|
||||
log.Error("failed to stop PoS L1 test environment", "err", errStop)
|
||||
}
|
||||
return fmt.Errorf("failed to start PoS L1 test environment: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the PoS L1 test environment by stopping and removing the associated Docker Compose services.
|
||||
func (e *PoSL1TestEnv) Stop() error {
|
||||
if e.compose != nil {
|
||||
if err := e.compose.Down(context.Background(), tc.RemoveOrphans(true), tc.RemoveVolumes(true), tc.RemoveImagesLocal); err != nil {
|
||||
return fmt.Errorf("failed to stop PoS L1 test environment: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Endpoint returns the HTTP endpoint for the PoS L1 test environment.
|
||||
func (e *PoSL1TestEnv) Endpoint() string {
|
||||
return fmt.Sprintf("http://127.0.0.1:%d", e.gethHTTPPort)
|
||||
}
|
||||
|
||||
// L1Client returns an ethclient by dialing the running PoS L1 test environment
|
||||
func (e *PoSL1TestEnv) L1Client() (*ethclient.Client, error) {
|
||||
if e == nil {
|
||||
return nil, fmt.Errorf("PoS L1 test environment is not initialized")
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial(e.Endpoint())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial PoS L1 test environment: %w", err)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func findProjectRootDir() (string, error) {
|
||||
currentDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get working directory: %w", err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, err := os.Stat(filepath.Join(currentDir, "go.work"))
|
||||
if err == nil {
|
||||
return currentDir, nil
|
||||
}
|
||||
|
||||
parentDir := filepath.Dir(currentDir)
|
||||
if parentDir == currentDir {
|
||||
return "", fmt.Errorf("go.work file not found in any parent directory")
|
||||
}
|
||||
|
||||
currentDir = parentDir
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ go 1.21
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/bits-and-blooms/bitset v1.12.0
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004
|
||||
github.com/docker/docker v25.0.3+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
@@ -14,7 +13,7 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/testcontainers/testcontainers-go v0.28.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0
|
||||
@@ -62,7 +61,7 @@ require (
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
@@ -183,7 +182,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.7.1 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.2 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
|
||||
@@ -148,8 +148,8 @@ github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoY
|
||||
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
@@ -607,10 +607,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
|
||||
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
|
||||
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=
|
||||
|
||||
@@ -8,14 +8,14 @@ services:
|
||||
mkdir -p /data/execution &&
|
||||
cp -a /execution/* /data/execution/"
|
||||
volumes:
|
||||
- ${HOST_PATH:-../../..}/common/docker-compose/l1/consensus:/consensus
|
||||
- ${HOST_PATH:-../../..}/common/docker-compose/l1/execution:/execution
|
||||
- ../../common/testcontainers/consensus:/consensus
|
||||
- ../../common/testcontainers/execution:/execution
|
||||
- data:/data
|
||||
|
||||
# Creates a genesis state for the beacon chain using a YAML configuration file and
|
||||
# a deterministic set of 64 validators.
|
||||
create-beacon-chain-genesis:
|
||||
image: "gcr.io/prysmaticlabs/prysm/cmd/prysmctl:latest"
|
||||
image: "gcr.io/prysmaticlabs/prysm/cmd/prysmctl:HEAD-263557"
|
||||
command:
|
||||
- testnet
|
||||
- generate-genesis
|
||||
@@ -96,7 +96,7 @@ services:
|
||||
- --nodiscover
|
||||
- --syncmode=full
|
||||
ports:
|
||||
- ${GETH_HTTP_PORT:-8545}:8545
|
||||
- 8545
|
||||
depends_on:
|
||||
geth-genesis:
|
||||
condition: service_completed_successfully
|
||||
@@ -4,10 +4,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/compose"
|
||||
"github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
"gorm.io/gorm"
|
||||
@@ -18,8 +21,8 @@ import (
|
||||
// TestcontainerApps testcontainers struct
|
||||
type TestcontainerApps struct {
|
||||
postgresContainer *postgres.PostgresContainer
|
||||
l1GethContainer *testcontainers.DockerContainer
|
||||
l2GethContainer *testcontainers.DockerContainer
|
||||
poSL1Container compose.ComposeStack
|
||||
|
||||
// common time stamp in nanoseconds.
|
||||
Timestamp int
|
||||
@@ -28,6 +31,11 @@ type TestcontainerApps struct {
|
||||
// NewTestcontainerApps returns new instance of TestcontainerApps struct
|
||||
func NewTestcontainerApps() *TestcontainerApps {
|
||||
timestamp := time.Now().Nanosecond()
|
||||
// In order to solve the problem of "creating reaper failed: failed to create container"
|
||||
// refer to https://github.com/testcontainers/testcontainers-go/issues/2172
|
||||
if err := os.Setenv("TESTCONTAINERS_RYUK_DISABLED", "true"); err != nil {
|
||||
panic("set env failed: " + err.Error())
|
||||
}
|
||||
return &TestcontainerApps{
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
@@ -53,30 +61,6 @@ func (t *TestcontainerApps) StartPostgresContainer() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartL1GethContainer starts a L1Geth container
|
||||
func (t *TestcontainerApps) StartL1GethContainer() error {
|
||||
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "scroll_l1geth",
|
||||
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
|
||||
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
|
||||
Cmd: []string{"--log.debug", "ANY"},
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
Started: true,
|
||||
}
|
||||
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
|
||||
if err != nil {
|
||||
log.Printf("failed to start scroll_l1geth container: %s", err)
|
||||
return err
|
||||
}
|
||||
t.l1GethContainer, _ = container.(*testcontainers.DockerContainer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartL2GethContainer starts a L2Geth container
|
||||
func (t *TestcontainerApps) StartL2GethContainer() error {
|
||||
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
|
||||
@@ -85,7 +69,10 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "scroll_l2geth",
|
||||
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
|
||||
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
|
||||
WaitingFor: wait.ForAll(
|
||||
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
|
||||
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
|
||||
),
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
@@ -100,6 +87,55 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartPoSL1Container starts the PoS L1 container by running the associated Docker Compose configuration
|
||||
func (t *TestcontainerApps) StartPoSL1Container() error {
|
||||
var (
|
||||
err error
|
||||
rootDir string
|
||||
dockerComposeFile string
|
||||
)
|
||||
|
||||
if rootDir, err = findProjectRootDir(); err != nil {
|
||||
return fmt.Errorf("failed to find project root directory: %v", err)
|
||||
}
|
||||
|
||||
dockerComposeFile = filepath.Join(rootDir, "common", "testcontainers", "docker-compose.yml")
|
||||
|
||||
if t.poSL1Container, err = compose.NewDockerCompose([]string{dockerComposeFile}...); err != nil {
|
||||
return err
|
||||
}
|
||||
err = t.poSL1Container.WaitForService("geth", wait.NewHTTPStrategy("/").
|
||||
WithPort("8545/tcp").
|
||||
WithStartupTimeout(15*time.Second)).
|
||||
Up(context.Background())
|
||||
if err != nil {
|
||||
t.poSL1Container = nil
|
||||
return fmt.Errorf("failed to start PoS L1 container: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPoSL1EndPoint returns the endpoint of the running PoS L1 endpoint
|
||||
func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
|
||||
if t.poSL1Container == nil {
|
||||
return "", fmt.Errorf("PoS L1 container is not running")
|
||||
}
|
||||
contrainer, err := t.poSL1Container.ServiceContainer(context.Background(), "geth")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return contrainer.PortEndpoint(context.Background(), "8545/tcp", "http")
|
||||
}
|
||||
|
||||
// GetPoSL1Client returns a ethclient by dialing running PoS L1 client
|
||||
func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
|
||||
endpoint, err := t.GetPoSL1EndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ethclient.Dial(endpoint)
|
||||
}
|
||||
|
||||
// GetDBEndPoint returns the endpoint of the running postgres container
|
||||
func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
|
||||
if t.postgresContainer == nil || !t.postgresContainer.IsRunning() {
|
||||
@@ -108,18 +144,6 @@ func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
|
||||
return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable")
|
||||
}
|
||||
|
||||
// GetL1GethEndPoint returns the endpoint of the running L1Geth container
|
||||
func (t *TestcontainerApps) GetL1GethEndPoint() (string, error) {
|
||||
if t.l1GethContainer == nil || !t.l1GethContainer.IsRunning() {
|
||||
return "", fmt.Errorf("l1 geth is not running")
|
||||
}
|
||||
endpoint, err := t.l1GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
|
||||
func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
|
||||
if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() {
|
||||
@@ -147,19 +171,6 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
return database.InitDB(dbCfg)
|
||||
}
|
||||
|
||||
// GetL1GethClient returns a ethclient by dialing running L1Geth
|
||||
func (t *TestcontainerApps) GetL1GethClient() (*ethclient.Client, error) {
|
||||
endpoint, err := t.GetL1GethEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := ethclient.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// GetL2GethClient returns a ethclient by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
|
||||
endpoint, err := t.GetL2GethEndPoint()
|
||||
@@ -181,14 +192,38 @@ func (t *TestcontainerApps) Free() {
|
||||
log.Printf("failed to stop postgres container: %s", err)
|
||||
}
|
||||
}
|
||||
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
|
||||
if err := t.l1GethContainer.Terminate(ctx); err != nil {
|
||||
log.Printf("failed to stop scroll_l1geth container: %s", err)
|
||||
}
|
||||
}
|
||||
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
|
||||
if err := t.l2GethContainer.Terminate(ctx); err != nil {
|
||||
log.Printf("failed to stop scroll_l2geth container: %s", err)
|
||||
}
|
||||
}
|
||||
if t.poSL1Container != nil {
|
||||
if err := t.poSL1Container.Down(context.Background(), compose.RemoveOrphans(true), compose.RemoveVolumes(true), compose.RemoveImagesLocal); err != nil {
|
||||
log.Printf("failed to stop PoS L1 container: %s", err)
|
||||
} else {
|
||||
t.poSL1Container = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// findProjectRootDir find project root directory
|
||||
func findProjectRootDir() (string, error) {
|
||||
currentDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get working directory: %w", err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, err := os.Stat(filepath.Join(currentDir, "go.work"))
|
||||
if err == nil {
|
||||
return currentDir, nil
|
||||
}
|
||||
|
||||
parentDir := filepath.Dir(currentDir)
|
||||
if parentDir == currentDir {
|
||||
return "", fmt.Errorf("go.work file not found in any parent directory")
|
||||
}
|
||||
|
||||
currentDir = parentDir
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,8 +17,9 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
ethclient *ethclient.Client
|
||||
)
|
||||
|
||||
// test start testcontainers
|
||||
testApps := NewTestcontainerApps()
|
||||
|
||||
// test start testcontainers
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
endpoint, err = testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
@@ -27,14 +28,6 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, gormDBclient)
|
||||
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
endpoint, err = testApps.GetL1GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetL1GethClient()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
@@ -43,17 +36,25 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
|
||||
assert.NoError(t, testApps.StartPoSL1Container())
|
||||
endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
|
||||
// test free testcontainers
|
||||
testApps.Free()
|
||||
endpoint, err = testApps.GetDBEndPoint()
|
||||
assert.EqualError(t, err, "postgres is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
|
||||
endpoint, err = testApps.GetL1GethEndPoint()
|
||||
assert.EqualError(t, err, "l1 geth is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
|
||||
endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.EqualError(t, err, "l2 geth is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
|
||||
endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.EqualError(t, err, "PoS L1 container is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
}
|
||||
|
||||
@@ -1,133 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const targetTxSize = 126914
|
||||
|
||||
func main() {
|
||||
privateKeyHex := "0000000000000000000000000000000000000000000000000000000000000042"
|
||||
privateKey, err := crypto.HexToECDSA(privateKeyHex)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid private key: %v", err)
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial("http://localhost:9999")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to the Ethereum client: %v", err)
|
||||
}
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(222222))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create transactor with chain ID 222222: %v", err)
|
||||
}
|
||||
|
||||
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve account nonce: %v", err)
|
||||
}
|
||||
|
||||
totalTxNum := []uint64{2, 3, 4, 5, 6}
|
||||
for _, num := range totalTxNum {
|
||||
prepareAndSendTransactions(client, auth, nonce, num)
|
||||
nonce += num
|
||||
}
|
||||
}
|
||||
|
||||
func prepareAndSendTransactions(client *ethclient.Client, auth *bind.TransactOpts, initialNonce uint64, totalTxNum uint64) error {
|
||||
gasLimit := uint64(5000000)
|
||||
gasPrice := big.NewInt(1000000000)
|
||||
|
||||
var signedTxs []*types.Transaction
|
||||
payloadSum := 0
|
||||
|
||||
dataPayload := make([]byte, targetTxSize/totalTxNum)
|
||||
for i := range dataPayload {
|
||||
dataPayload[i] = 0xff
|
||||
}
|
||||
|
||||
for i := uint64(0); i < totalTxNum-1; i++ {
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + i,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: dataPayload,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
|
||||
payloadSum += len(rlpTxData)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
}
|
||||
|
||||
fmt.Println("payload sum", payloadSum)
|
||||
|
||||
lowerBound := 0
|
||||
upperBound := targetTxSize
|
||||
for lowerBound <= upperBound {
|
||||
mid := (lowerBound + upperBound) / 2
|
||||
data := make([]byte, mid)
|
||||
for i := range data {
|
||||
data[i] = 0xff
|
||||
}
|
||||
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + totalTxNum - 1,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
txSize := len(rlpTxData)
|
||||
|
||||
if payloadSum+txSize < targetTxSize {
|
||||
lowerBound = mid + 1
|
||||
} else if payloadSum+txSize > targetTxSize {
|
||||
upperBound = mid - 1
|
||||
} else {
|
||||
fmt.Println("payloadSum+txSize", payloadSum+txSize)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, signedTx := range signedTxs {
|
||||
if err := client.SendTransaction(context.Background(), signedTx); err != nil {
|
||||
return fmt.Errorf("failed to send transaction: %v", err)
|
||||
}
|
||||
fmt.Printf("Transaction with nonce %d sent\n", signedTx.Nonce())
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const targetTxSize = 120568
|
||||
|
||||
func main() {
|
||||
privateKeyHex := "0000000000000000000000000000000000000000000000000000000000000042"
|
||||
privateKey, err := crypto.HexToECDSA(privateKeyHex)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid private key: %v", err)
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial("http://localhost:9999")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to the Ethereum client: %v", err)
|
||||
}
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(222222))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create transactor with chain ID 222222: %v", err)
|
||||
}
|
||||
|
||||
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve account nonce: %v", err)
|
||||
}
|
||||
prepareAndSendTransactions(client, auth, nonce, 1)
|
||||
prepareAndSendTransactions(client, auth, nonce+1, 2)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2, 3)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2+3, 4)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2+3+4, 5)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2+3+4+5, 6)
|
||||
}
|
||||
|
||||
func prepareAndSendTransactions(client *ethclient.Client, auth *bind.TransactOpts, initialNonce uint64, totalTxNum uint64) error {
|
||||
gasLimit := uint64(5000000)
|
||||
gasPrice := big.NewInt(1000000000)
|
||||
|
||||
var signedTxs []*types.Transaction
|
||||
payloadSum := 0
|
||||
|
||||
dataPayload := make([]byte, targetTxSize/totalTxNum)
|
||||
for i := range dataPayload {
|
||||
dataPayload[i] = 0xff
|
||||
}
|
||||
|
||||
for i := uint64(0); i < totalTxNum-1; i++ {
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + i,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: dataPayload,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
|
||||
payloadSum += len(rlpTxData)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
}
|
||||
|
||||
fmt.Println("payload sum", payloadSum)
|
||||
|
||||
lowerBound := 0
|
||||
upperBound := targetTxSize
|
||||
for lowerBound <= upperBound {
|
||||
mid := (lowerBound + upperBound) / 2
|
||||
data := make([]byte, mid)
|
||||
for i := range data {
|
||||
data[i] = 0xff
|
||||
}
|
||||
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + totalTxNum - 1,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
txSize := len(rlpTxData)
|
||||
|
||||
if payloadSum+txSize < targetTxSize {
|
||||
lowerBound = mid + 1
|
||||
} else if payloadSum+txSize > targetTxSize {
|
||||
upperBound = mid - 1
|
||||
} else {
|
||||
fmt.Println("payloadSum+txSize", payloadSum+txSize)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := len(signedTxs) - 1; i >= 0; i-- {
|
||||
if err := client.SendTransaction(context.Background(), signedTxs[i]); err != nil {
|
||||
return fmt.Errorf("failed to send transaction: %v", err)
|
||||
}
|
||||
fmt.Printf("Transaction with nonce %d sent\n", signedTxs[i].Nonce())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -302,8 +302,6 @@ func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
|
||||
}
|
||||
|
||||
// EstimateBlockL1CommitCalldataSize calculates the calldata size in l1 commit for this block approximately.
|
||||
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
|
||||
// This needs to be adjusted in the future.
|
||||
func EstimateBlockL1CommitCalldataSize(b *encoding.Block) (uint64, error) {
|
||||
var size uint64
|
||||
for _, txData := range b.Transactions {
|
||||
|
||||
@@ -338,7 +338,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash,
|
||||
}
|
||||
|
||||
// compute blob versioned hash
|
||||
c, err := kzg4844.BlobToCommitment(*blob)
|
||||
c, err := kzg4844.BlobToCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
@@ -364,7 +364,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash,
|
||||
func makeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) {
|
||||
// blob contains 131072 bytes but we can only utilize 31/32 of these
|
||||
if len(blobBytes) > 126976 {
|
||||
return nil, fmt.Errorf("oversized batch payload")
|
||||
return nil, fmt.Errorf("oversized batch payload, blob bytes length: %v, max length: %v", len(blobBytes), 126976)
|
||||
}
|
||||
|
||||
// the canonical (padded) blob payload
|
||||
@@ -435,12 +435,12 @@ func (b *DABatch) BlobDataProof() ([]byte, error) {
|
||||
return nil, errors.New("called BlobDataProof with empty z")
|
||||
}
|
||||
|
||||
commitment, err := kzg4844.BlobToCommitment(*b.blob)
|
||||
commitment, err := kzg4844.BlobToCommitment(b.blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
|
||||
proof, y, err := kzg4844.ComputeProof(*b.blob, *b.z)
|
||||
proof, y, err := kzg4844.ComputeProof(b.blob, *b.z)
|
||||
if err != nil {
|
||||
log.Crit("failed to create KZG proof at point", "err", err, "z", hex.EncodeToString(b.z[:]))
|
||||
}
|
||||
@@ -472,8 +472,7 @@ func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
paddedSize := ((metadataSize + chunkDataSize + 30) / 31) * 32
|
||||
return paddedSize, nil
|
||||
return calculatePaddedBlobSize(metadataSize + chunkDataSize), nil
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
|
||||
@@ -487,8 +486,7 @@ func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
|
||||
}
|
||||
batchDataSize += chunkDataSize
|
||||
}
|
||||
paddedSize := ((metadataSize + batchDataSize + 30) / 31) * 32
|
||||
return paddedSize, nil
|
||||
return calculatePaddedBlobSize(metadataSize + batchDataSize), nil
|
||||
}
|
||||
|
||||
func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
|
||||
@@ -506,3 +504,134 @@ func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
|
||||
}
|
||||
return dataSize, nil
|
||||
}
|
||||
|
||||
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
|
||||
const CalldataNonZeroByteGas = 16
|
||||
|
||||
// GetKeccak256Gas calculates the gas cost for computing the keccak256 hash of a given size.
|
||||
func GetKeccak256Gas(size uint64) uint64 {
|
||||
return GetMemoryExpansionCost(size) + 30 + 6*((size+31)/32)
|
||||
}
|
||||
|
||||
// GetMemoryExpansionCost calculates the cost of memory expansion for a given memoryByteSize.
|
||||
func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
|
||||
memorySizeWord := (memoryByteSize + 31) / 32
|
||||
memoryCost := (memorySizeWord*memorySizeWord)/512 + (3 * memorySizeWord)
|
||||
return memoryCost
|
||||
}
|
||||
|
||||
// EstimateBlockL1CommitGas calculates the total L1 commit gas for this block approximately.
|
||||
func EstimateBlockL1CommitGas(b *encoding.Block) uint64 {
|
||||
var total uint64
|
||||
var numL1Messages uint64
|
||||
for _, txData := range b.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
numL1Messages++
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// 60 bytes BlockContext calldata
|
||||
total += CalldataNonZeroByteGas * 60
|
||||
|
||||
// sload
|
||||
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
|
||||
|
||||
// staticcall
|
||||
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
|
||||
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue
|
||||
|
||||
total += GetMemoryExpansionCost(36) * numL1Messages // staticcall to proxy
|
||||
total += 100 * numL1Messages // read admin in proxy
|
||||
total += 100 * numL1Messages // read impl in proxy
|
||||
total += 100 * numL1Messages // access impl
|
||||
total += GetMemoryExpansionCost(36) * numL1Messages // delegatecall to impl
|
||||
|
||||
return total
|
||||
}
|
||||
|
||||
// EstimateChunkL1CommitCalldataSize calculates the calldata size needed for committing a chunk to L1 approximately.
|
||||
func EstimateChunkL1CommitCalldataSize(c *encoding.Chunk) uint64 {
|
||||
return uint64(60 * len(c.Blocks))
|
||||
}
|
||||
|
||||
// EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately.
|
||||
func EstimateChunkL1CommitGas(c *encoding.Chunk) uint64 {
|
||||
var totalNonSkippedL1Messages uint64
|
||||
var totalL1CommitGas uint64
|
||||
for _, block := range c.Blocks {
|
||||
totalNonSkippedL1Messages += uint64(len(block.Transactions)) - block.NumL2Transactions()
|
||||
blockL1CommitGas := EstimateBlockL1CommitGas(block)
|
||||
totalL1CommitGas += blockL1CommitGas
|
||||
}
|
||||
|
||||
numBlocks := uint64(len(c.Blocks))
|
||||
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
|
||||
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
|
||||
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
|
||||
|
||||
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalNonSkippedL1Messages) // chunk hash
|
||||
return totalL1CommitGas
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately.
|
||||
func EstimateBatchL1CommitGas(b *encoding.Batch) uint64 {
|
||||
var totalL1CommitGas uint64
|
||||
|
||||
// Add extra gas costs
|
||||
totalL1CommitGas += 100000 // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc
|
||||
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
|
||||
totalL1CommitGas += 20000 // 1 time sstore
|
||||
totalL1CommitGas += 21000 // base fee for tx
|
||||
totalL1CommitGas += CalldataNonZeroByteGas // version in calldata
|
||||
|
||||
// adjusting gas:
|
||||
// add 1 time cold sload (2100 gas) for L1MessageQueue
|
||||
// add 1 time cold address access (2600 gas) for L1MessageQueue
|
||||
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
|
||||
totalL1CommitGas += (2100 + 2600 - 100 - 100)
|
||||
totalL1CommitGas += GetKeccak256Gas(89 + 32) // parent batch header hash, length is estimated as 89 (constant part)+ 32 (1 skippedL1MessageBitmap)
|
||||
totalL1CommitGas += CalldataNonZeroByteGas * (89 + 32) // parent batch header in calldata
|
||||
|
||||
// adjust batch data hash gas cost
|
||||
totalL1CommitGas += GetKeccak256Gas(uint64(32 * len(b.Chunks)))
|
||||
|
||||
totalL1MessagePoppedBefore := b.TotalL1MessagePoppedBefore
|
||||
|
||||
for _, chunk := range b.Chunks {
|
||||
chunkL1CommitGas := EstimateChunkL1CommitGas(chunk)
|
||||
totalL1CommitGas += chunkL1CommitGas
|
||||
|
||||
totalL1MessagePoppedInChunk := chunk.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
totalL1MessagePoppedBefore += totalL1MessagePoppedInChunk
|
||||
|
||||
totalL1CommitGas += CalldataNonZeroByteGas * (32 * (totalL1MessagePoppedInChunk + 255) / 256)
|
||||
totalL1CommitGas += GetKeccak256Gas(89 + 32*(totalL1MessagePoppedInChunk+255)/256)
|
||||
|
||||
totalL1CommitCalldataSize := EstimateChunkL1CommitCalldataSize(chunk)
|
||||
totalL1CommitGas += GetMemoryExpansionCost(totalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
return totalL1CommitGas
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
|
||||
func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) uint64 {
|
||||
var totalL1CommitCalldataSize uint64
|
||||
for _, chunk := range b.Chunks {
|
||||
totalL1CommitCalldataSize += EstimateChunkL1CommitCalldataSize(chunk)
|
||||
}
|
||||
return totalL1CommitCalldataSize
|
||||
}
|
||||
|
||||
// calculatePaddedBlobSize calculates the required size on blob storage
|
||||
// where every 32 bytes can store only 31 bytes of actual data, with the first byte being zero.
|
||||
func calculatePaddedBlobSize(dataSize uint64) uint64 {
|
||||
paddedSize := (dataSize / 31) * 32
|
||||
|
||||
if dataSize%31 != 0 {
|
||||
paddedSize += 1 + dataSize%31 // Add 1 byte for the first empty byte plus the remainder bytes
|
||||
}
|
||||
|
||||
return paddedSize
|
||||
}
|
||||
|
||||
@@ -592,7 +592,7 @@ func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
|
||||
actualZ := hex.EncodeToString(z[:])
|
||||
assert.Equal(t, tc.expectedz, actualZ)
|
||||
|
||||
_, y, err := kzg4844.ComputeProof(*b, *z)
|
||||
_, y, err := kzg4844.ComputeProof(b, *z)
|
||||
assert.NoError(t, err)
|
||||
actualY := hex.EncodeToString(y[:])
|
||||
assert.Equal(t, tc.expectedy, actualY)
|
||||
@@ -759,49 +759,121 @@ func TestCodecV1BatchSkipBitmap(t *testing.T) {
|
||||
assert.Equal(t, 42, int(batch.TotalL1MessagePopped))
|
||||
}
|
||||
|
||||
func TestCodecV1ChunkAndBatchBlobSizeEstimation(t *testing.T) {
|
||||
func TestCodecV1ChunkAndBatchCommitBlobSizeEstimation(t *testing.T) {
|
||||
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
|
||||
chunk2BlobSize, err := EstimateChunkL1CommitBlobSize(chunk2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(320), chunk2BlobSize)
|
||||
assert.Equal(t, uint64(302), chunk2BlobSize)
|
||||
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
|
||||
batch2BlobSize, err := EstimateBatchL1CommitBlobSize(batch2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(320), batch2BlobSize)
|
||||
assert.Equal(t, uint64(302), batch2BlobSize)
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
chunk3BlobSize, err := EstimateChunkL1CommitBlobSize(chunk3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(5952), chunk3BlobSize)
|
||||
assert.Equal(t, uint64(5929), chunk3BlobSize)
|
||||
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
|
||||
batch3BlobSize, err := EstimateBatchL1CommitBlobSize(batch3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(5952), batch3BlobSize)
|
||||
assert.Equal(t, uint64(5929), batch3BlobSize)
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
chunk4BlobSize, err := EstimateChunkL1CommitBlobSize(chunk4)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), chunk4BlobSize)
|
||||
assert.Equal(t, uint64(98), chunk4BlobSize)
|
||||
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
|
||||
batch4BlobSize, err := EstimateBatchL1CommitBlobSize(batch4)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), batch4BlobSize)
|
||||
assert.Equal(t, uint64(98), batch4BlobSize)
|
||||
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
|
||||
chunk5BlobSize, err := EstimateChunkL1CommitBlobSize(chunk5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6176), chunk5BlobSize)
|
||||
assert.Equal(t, uint64(6166), chunk5BlobSize)
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
chunk6BlobSize, err := EstimateChunkL1CommitBlobSize(chunk6)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), chunk6BlobSize)
|
||||
assert.Equal(t, uint64(98), chunk6BlobSize)
|
||||
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
|
||||
batch5BlobSize, err := EstimateBatchL1CommitBlobSize(batch5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6208), batch5BlobSize)
|
||||
assert.Equal(t, uint64(6199), batch5BlobSize)
|
||||
}
|
||||
|
||||
func TestCodecV1ChunkAndBatchCommitCalldataSizeEstimation(t *testing.T) {
|
||||
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
|
||||
chunk2CalldataSize := EstimateChunkL1CommitCalldataSize(chunk2)
|
||||
assert.Equal(t, uint64(60), chunk2CalldataSize)
|
||||
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
|
||||
batch2CalldataSize := EstimateBatchL1CommitCalldataSize(batch2)
|
||||
assert.Equal(t, uint64(60), batch2CalldataSize)
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
chunk3CalldataSize := EstimateChunkL1CommitCalldataSize(chunk3)
|
||||
assert.Equal(t, uint64(60), chunk3CalldataSize)
|
||||
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
|
||||
batch3CalldataSize := EstimateBatchL1CommitCalldataSize(batch3)
|
||||
assert.Equal(t, uint64(60), batch3CalldataSize)
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
chunk4CalldataSize := EstimateChunkL1CommitCalldataSize(chunk4)
|
||||
assert.Equal(t, uint64(60), chunk4CalldataSize)
|
||||
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
|
||||
batch4BlobSize := EstimateBatchL1CommitCalldataSize(batch4)
|
||||
assert.Equal(t, uint64(60), batch4BlobSize)
|
||||
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
|
||||
chunk5CalldataSize := EstimateChunkL1CommitCalldataSize(chunk5)
|
||||
assert.Equal(t, uint64(120), chunk5CalldataSize)
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
chunk6BlobSize := EstimateChunkL1CommitCalldataSize(chunk6)
|
||||
assert.Equal(t, uint64(60), chunk6BlobSize)
|
||||
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
|
||||
batch5CalldataSize := EstimateBatchL1CommitCalldataSize(batch5)
|
||||
assert.Equal(t, uint64(180), batch5CalldataSize)
|
||||
}
|
||||
|
||||
func TestCodecV1ChunkAndBatchCommitGasEstimation(t *testing.T) {
|
||||
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
|
||||
chunk2Gas := EstimateChunkL1CommitGas(chunk2)
|
||||
assert.Equal(t, uint64(2084), chunk2Gas)
|
||||
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
|
||||
batch2Gas := EstimateBatchL1CommitGas(batch2)
|
||||
assert.Equal(t, uint64(158609), batch2Gas)
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
chunk3Gas := EstimateChunkL1CommitGas(chunk3)
|
||||
assert.Equal(t, uint64(2084), chunk3Gas)
|
||||
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
|
||||
batch3Gas := EstimateBatchL1CommitGas(batch3)
|
||||
assert.Equal(t, uint64(158609), batch3Gas)
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
chunk4Gas := EstimateChunkL1CommitGas(chunk4)
|
||||
assert.Equal(t, uint64(4705), chunk4Gas)
|
||||
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
|
||||
batch4Gas := EstimateBatchL1CommitGas(batch4)
|
||||
assert.Equal(t, uint64(161262), batch4Gas)
|
||||
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
|
||||
chunk5Gas := EstimateChunkL1CommitGas(chunk5)
|
||||
assert.Equal(t, uint64(4122), chunk5Gas)
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
chunk6Gas := EstimateChunkL1CommitGas(chunk6)
|
||||
assert.Equal(t, uint64(4705), chunk6Gas)
|
||||
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
|
||||
batch5Gas := EstimateBatchL1CommitGas(batch5)
|
||||
assert.Equal(t, uint64(165967), batch5Gas)
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
|
||||
func CheckScrollProverVersion(proverVersion string) bool {
|
||||
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
|
||||
// note the version is in fact in the format of "tag-commit-scroll_prover-halo2",
|
||||
// so split-by-'-' length should be 4
|
||||
remote := strings.Split(proverVersion, "-")
|
||||
if len(remote) != 4 {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.92"
|
||||
var tag = "v4.4.6"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -67,7 +67,7 @@ Commit a batch of transactions on layer 1.
|
||||
function committedBatches(uint256) external view returns (bytes32)
|
||||
```
|
||||
|
||||
Return the batch hash of a committed batch.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ Return the batch hash of a committed batch.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bytes32 | undefined |
|
||||
| _0 | bytes32 | The batch hash of a committed batch. |
|
||||
|
||||
### finalizeBatchWithProof
|
||||
|
||||
@@ -130,7 +130,7 @@ Finalize a committed batch (with blob) on layer 1.
|
||||
function finalizedStateRoots(uint256) external view returns (bytes32)
|
||||
```
|
||||
|
||||
Return the state root of a committed batch.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -144,7 +144,7 @@ Return the state root of a committed batch.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bytes32 | undefined |
|
||||
| _0 | bytes32 | The state root of a committed batch. |
|
||||
|
||||
### importGenesisBatch
|
||||
|
||||
@@ -160,8 +160,8 @@ Import layer 2 genesis block
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _batchHeader | bytes | undefined |
|
||||
| _stateRoot | bytes32 | undefined |
|
||||
| _batchHeader | bytes | The header of the genesis batch. |
|
||||
| _stateRoot | bytes32 | The state root of the genesis block. |
|
||||
|
||||
### initialize
|
||||
|
||||
@@ -187,7 +187,7 @@ Initialize the storage of ScrollChain.
|
||||
function isBatchFinalized(uint256 _batchIndex) external view returns (bool)
|
||||
```
|
||||
|
||||
Return whether the batch is finalized by batch index.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -201,7 +201,7 @@ Return whether the batch is finalized by batch index.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
| _0 | bool | Whether the batch is finalized by batch index. |
|
||||
|
||||
### isProver
|
||||
|
||||
@@ -253,7 +253,7 @@ Whether an account is a sequencer.
|
||||
function lastFinalizedBatchIndex() external view returns (uint256)
|
||||
```
|
||||
|
||||
The latest finalized batch index.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -262,7 +262,7 @@ The latest finalized batch index.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | uint256 | undefined |
|
||||
| _0 | uint256 | The latest finalized batch index. |
|
||||
|
||||
### layer2ChainId
|
||||
|
||||
@@ -480,7 +480,7 @@ The address of RollupVerifier.
|
||||
function withdrawRoots(uint256) external view returns (bytes32)
|
||||
```
|
||||
|
||||
Return the message root of a committed batch.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -494,7 +494,7 @@ Return the message root of a committed batch.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bytes32 | undefined |
|
||||
| _0 | bytes32 | The message root of a committed batch. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {Ecc} from "../../src/misc/ecc.sol";
|
||||
|
||||
contract DeployEcc is Script {
|
||||
function run() external {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
Ecc ecc = new Ecc();
|
||||
address L2_ECC_ADDR = address(ecc);
|
||||
vm.stopBroadcast();
|
||||
|
||||
logAddress("L2_ECC_ADDR", L2_ECC_ADDR);
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {Hash} from "../../src/misc/hash.sol";
|
||||
|
||||
contract DeployHash is Script {
|
||||
function run() external {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
Hash hash = new Hash();
|
||||
address L2_HASH_ADDR = address(hash);
|
||||
vm.stopBroadcast();
|
||||
|
||||
logAddress("L2_HASH_ADDR", L2_HASH_ADDR);
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
@@ -92,12 +92,10 @@ contract DeployL1BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployMultipleVersionRollupVerifier() internal {
|
||||
uint256[] memory _versions = new uint256[](2);
|
||||
address[] memory _verifiers = new address[](2);
|
||||
uint256[] memory _versions = new uint256[](1);
|
||||
address[] memory _verifiers = new address[](1);
|
||||
_versions[0] = 0;
|
||||
_verifiers[0] = address(zkEvmVerifierV1);
|
||||
_versions[1] = 1;
|
||||
_verifiers[1] = address(zkEvmVerifierV1);
|
||||
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
|
||||
|
||||
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import * as dotenv from "dotenv";
|
||||
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
dotenv.config();
|
||||
|
||||
async function main() {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
|
||||
const l1ScrollMessengerAddress = process.env.L1_SCROLL_MESSENGER_PROXY_ADDR!;
|
||||
const l2EccContractAddress = process.env.L2_ECC_ADDR!;
|
||||
const payload = process.env.SKIPPED_TX_PAYLOAD!; // TODO: calc the payload, parse as bytes
|
||||
|
||||
const L1ScrollMessenger = await ethers.getContractAt("L1ScrollMessenger", l1ScrollMessengerAddress, deployer);
|
||||
|
||||
const tx = await L1ScrollMessenger.sendMessage(
|
||||
l2EccContractAddress, // address _to
|
||||
0, // uint256 _value
|
||||
payload, // bytes memory _message
|
||||
100000000 // uint256 _gasLimit
|
||||
);
|
||||
|
||||
console.log(`calling ${l2EccContractAddress} with payload from l1, hash:`, tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
}
|
||||
|
||||
// We recommend this pattern to be able to use async/await everywhere
|
||||
// and properly handle errors.
|
||||
main().catch((error) => {
|
||||
console.error(error);
|
||||
process.exitCode = 1;
|
||||
});
|
||||
@@ -11,7 +11,7 @@ import {IScrollERC20Upgradeable} from "../../libraries/token/IScrollERC20Upgrade
|
||||
/// @title L2ERC20Gateway
|
||||
/// @notice The `L2ERC20Gateway` is used to withdraw custom ERC20 compatible tokens on layer 2 and
|
||||
/// finalize deposit the tokens from layer 1.
|
||||
/// @dev The withdrawn tokens tokens will be burned directly. On finalizing deposit, the corresponding
|
||||
/// @dev The withdrawn tokens will be burned directly. On finalizing deposit, the corresponding
|
||||
/// tokens will be minted and transferred to the recipient.
|
||||
contract L2CustomERC20Gateway is L2ERC20Gateway {
|
||||
/**********
|
||||
|
||||
35
contracts/src/batch-bridge/BatchBridgeCodec.sol
Normal file
35
contracts/src/batch-bridge/BatchBridgeCodec.sol
Normal file
@@ -0,0 +1,35 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
library BatchBridgeCodec {
|
||||
/// @dev Encode the `token` and `batchIndex` to single `bytes32`.
|
||||
function encodeInitialNode(address token, uint64 batchIndex) internal pure returns (bytes32 node) {
|
||||
assembly {
|
||||
node := add(shl(96, token), batchIndex)
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Encode the `sender` and `amount` to single `bytes32`.
|
||||
function encodeNode(address sender, uint96 amount) internal pure returns (bytes32 node) {
|
||||
assembly {
|
||||
node := add(shl(96, sender), amount)
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Decode `bytes32` `node` to `receiver` and `amount`.
|
||||
function decodeNode(bytes32 node) internal pure returns (address receiver, uint256 amount) {
|
||||
receiver = address(uint160(uint256(node) >> 96));
|
||||
amount = uint256(node) & 0xffffffffffffffffffffffff;
|
||||
}
|
||||
|
||||
/// @dev Compute `keccak256(concat(a, b))`.
|
||||
function hash(bytes32 a, bytes32 b) internal pure returns (bytes32 value) {
|
||||
// solhint-disable-next-line no-inline-assembly
|
||||
assembly {
|
||||
mstore(0x00, a)
|
||||
mstore(0x20, b)
|
||||
value := keccak256(0x00, 0x40)
|
||||
}
|
||||
}
|
||||
}
|
||||
424
contracts/src/batch-bridge/L1BatchBridgeGateway.sol
Normal file
424
contracts/src/batch-bridge/L1BatchBridgeGateway.sol
Normal file
@@ -0,0 +1,424 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
import {AccessControlEnumerableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/AccessControlEnumerableUpgradeable.sol";
|
||||
import {ReentrancyGuardUpgradeable} from "@openzeppelin/contracts-upgradeable/security/ReentrancyGuardUpgradeable.sol";
|
||||
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
|
||||
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
|
||||
|
||||
import {IL1ERC20Gateway} from "../L1/gateways/IL1ERC20Gateway.sol";
|
||||
import {IL1GatewayRouter} from "../L1/gateways/IL1GatewayRouter.sol";
|
||||
import {IL1MessageQueue} from "../L1/rollup/IL1MessageQueue.sol";
|
||||
import {IL1ScrollMessenger} from "../L1/IL1ScrollMessenger.sol";
|
||||
|
||||
import {BatchBridgeCodec} from "./BatchBridgeCodec.sol";
|
||||
import {L2BatchBridgeGateway} from "./L2BatchBridgeGateway.sol";
|
||||
|
||||
/// @title L1BatchBridgeGateway
|
||||
contract L1BatchBridgeGateway is AccessControlEnumerableUpgradeable, ReentrancyGuardUpgradeable {
|
||||
using SafeERC20Upgradeable for IERC20Upgradeable;
|
||||
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when some user deposited token to this contract.
|
||||
/// @param sender The address of token sender.
|
||||
/// @param token The address of deposited token.
|
||||
/// @param batchIndex The batch index of current deposit.
|
||||
/// @param amount The amount of token deposited (including fee).
|
||||
/// @param fee The amount of fee charged.
|
||||
event Deposit(
|
||||
address indexed sender,
|
||||
address indexed token,
|
||||
uint256 indexed batchIndex,
|
||||
uint256 amount,
|
||||
uint256 fee
|
||||
);
|
||||
|
||||
/// @notice Emitted when a batch deposit is initiated.
|
||||
/// @param caller The address of caller who initiate the deposit.
|
||||
/// @param l1Token The address of the token in L1 to deposit.
|
||||
/// @param batchIndex The index of current batch deposit.
|
||||
/// @param l2Token The address of the corresponding token in L2.
|
||||
event BatchDeposit(address indexed caller, address indexed l1Token, uint256 indexed batchIndex, address l2Token);
|
||||
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when caller is not `messenger`.
|
||||
error ErrorCallerNotMessenger();
|
||||
|
||||
/// @dev Thrown when the deposited amount is smaller than `minAmountPerTx`.
|
||||
error ErrorDepositAmountTooSmall();
|
||||
|
||||
/// @dev Thrown when users try to deposit ETH with `depositERC20` method.
|
||||
error ErrorIncorrectMethodForETHDeposit();
|
||||
|
||||
/// @dev Thrown when the `msg.value` is not enough for batch deposit fee.
|
||||
error ErrorInsufficientMsgValueForBatchDepositFee();
|
||||
|
||||
/// @dev Thrown when the given new batch config is invalid.
|
||||
error ErrorInvalidBatchConfig();
|
||||
|
||||
/// @dev Thrown when no pending batch exists.
|
||||
error ErrorNoPendingBatch();
|
||||
|
||||
/// @dev Thrown when user deposits unsupported tokens.
|
||||
error ErrorTokenNotSupported();
|
||||
|
||||
/// @dev Thrown when ETH transfer failed.
|
||||
error ErrorTransferETHFailed();
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @notice The role for batch deposit keeper.
|
||||
bytes32 public constant KEEPER_ROLE = keccak256("KEEPER_ROLE");
|
||||
|
||||
/// @notice The safe gas limit for batch bridge.
|
||||
uint256 private constant SAFE_BATCH_BRIDGE_GAS_LIMIT = 200000;
|
||||
|
||||
/// @notice The address of corresponding `L2BatchBridgeGateway` contract.
|
||||
address public immutable counterpart;
|
||||
|
||||
/// @notice The address of `L1GatewayRouter` contract.
|
||||
address public immutable router;
|
||||
|
||||
/// @notice The address of `L1ScrollMessenger` contract.
|
||||
address public immutable messenger;
|
||||
|
||||
/// @notice The address of `L1MessageQueue` contract.
|
||||
address public immutable queue;
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
/// @notice The config for batch token bridge.
|
||||
/// @dev Compiler will pack this into a single `bytes32`.
|
||||
/// @param feeAmountPerTx The amount of fee charged for each deposit.
|
||||
/// @param minAmountPerTx The minimum amount of token for each deposit.
|
||||
/// @param maxTxsPerBatch The maximum number of deposit in each batch.
|
||||
/// @param maxDelayPerBatch The maximum number of seconds to wait in each batch.
|
||||
/// @param safeBridgeGasLimit The safe bridge gas limit for bridging token from L1 to L2.
|
||||
struct BatchConfig {
|
||||
uint96 feeAmountPerTx;
|
||||
uint96 minAmountPerTx;
|
||||
uint16 maxTxsPerBatch;
|
||||
uint24 maxDelayPerBatch;
|
||||
uint24 safeBridgeGasLimit;
|
||||
}
|
||||
|
||||
/// @dev Compiler will pack this into two `bytes32`.
|
||||
/// @param amount The total amount of token to deposit in current batch.
|
||||
/// @param startTime The timestamp of the first deposit.
|
||||
/// @param numDeposits The total number of deposits in current batch.
|
||||
/// @param hash The hash of current batch.
|
||||
/// Suppose there are `n` deposits in current batch with `senders` and `amounts`. The hash is computed as
|
||||
/// ```text
|
||||
/// hash[0] = concat(token, batch_index)
|
||||
/// hash[i] = keccak(hash[i-1], concat(senders[i], amounts[i]))
|
||||
/// ```
|
||||
/// The type of `token` and `senders` is `address`, while The type of `batch_index` and `amounts[i]` is `uint96`.
|
||||
/// In current way, the hash of each batch among all tokens should be different.
|
||||
struct BatchState {
|
||||
uint128 amount;
|
||||
uint64 startTime;
|
||||
uint64 numDeposits;
|
||||
bytes32 hash;
|
||||
}
|
||||
|
||||
/// @dev Compiler will pack this into a single `bytes32`.
|
||||
/// @param pending The total amount of token pending to bridge.
|
||||
/// @param currentBatchIndex The index of current batch.
|
||||
/// @param pendingBatchIndex The index of pending batch (next batch to bridge).
|
||||
struct TokenState {
|
||||
uint128 pending;
|
||||
uint64 currentBatchIndex;
|
||||
uint64 pendingBatchIndex;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Mapping from token address to batch bridge config.
|
||||
/// @dev The `address(0)` is used for ETH.
|
||||
mapping(address => BatchConfig) public configs;
|
||||
|
||||
/// @notice Mapping from token address to batch index to batch state.
|
||||
/// @dev The `address(0)` is used for ETH.
|
||||
mapping(address => mapping(uint256 => BatchState)) public batches;
|
||||
|
||||
/// @notice Mapping from token address to token state.
|
||||
/// @dev The `address(0)` is used for ETH.
|
||||
mapping(address => TokenState) public tokens;
|
||||
|
||||
/// @notice The address of fee vault.
|
||||
address public feeVault;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
/// @param _counterpart The address of `L2BatchBridgeGateway` contract in L2.
|
||||
/// @param _router The address of `L1GatewayRouter` contract in L1.
|
||||
/// @param _messenger The address of `L1ScrollMessenger` contract in L1.
|
||||
/// @param _queue The address of `L1MessageQueue` contract in L1.
|
||||
constructor(
|
||||
address _counterpart,
|
||||
address _router,
|
||||
address _messenger,
|
||||
address _queue
|
||||
) {
|
||||
_disableInitializers();
|
||||
|
||||
counterpart = _counterpart;
|
||||
router = _router;
|
||||
messenger = _messenger;
|
||||
queue = _queue;
|
||||
}
|
||||
|
||||
/// @notice Initialize the storage of `L1BatchBridgeGateway`.
|
||||
/// @param _feeVault The address of fee vault contract.
|
||||
function initialize(address _feeVault) external initializer {
|
||||
__Context_init(); // from ContextUpgradeable
|
||||
__ERC165_init(); // from ERC165Upgradeable
|
||||
__AccessControl_init(); // from AccessControlUpgradeable
|
||||
__AccessControlEnumerable_init(); // from AccessControlEnumerableUpgradeable
|
||||
__ReentrancyGuard_init(); // from ReentrancyGuardUpgradeable
|
||||
|
||||
feeVault = _feeVault;
|
||||
_grantRole(DEFAULT_ADMIN_ROLE, _msgSender());
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @notice Receive refunded ETH from `L1ScrollMessenger`.
|
||||
receive() external payable {
|
||||
if (_msgSender() != messenger) {
|
||||
revert ErrorCallerNotMessenger();
|
||||
}
|
||||
}
|
||||
|
||||
/// @notice Deposit ETH.
|
||||
function depositETH() external payable nonReentrant {
|
||||
// no safe cast check here, since no one has so much ETH yet.
|
||||
_deposit(address(0), _msgSender(), uint96(msg.value));
|
||||
}
|
||||
|
||||
/// @notice Deposit ERC20 token.
|
||||
///
|
||||
/// @param token The address of token.
|
||||
/// @param amount The amount of token to deposit. We use type `uint96`, since it is enough for most of the major tokens.
|
||||
function depositERC20(address token, uint96 amount) external nonReentrant {
|
||||
if (token == address(0)) revert ErrorIncorrectMethodForETHDeposit();
|
||||
|
||||
// common practice to handle fee on transfer token.
|
||||
uint256 beforeBalance = IERC20Upgradeable(token).balanceOf(address(this));
|
||||
IERC20Upgradeable(token).safeTransferFrom(_msgSender(), address(this), amount);
|
||||
amount = uint96(IERC20Upgradeable(token).balanceOf(address(this)) - beforeBalance);
|
||||
|
||||
_deposit(token, _msgSender(), amount);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Add or update the batch bridge config for the given token.
|
||||
///
|
||||
/// @dev The caller should make sure `safeBridgeGasLimit` is enough for batch bridging.
|
||||
///
|
||||
/// @param token The address of token to update.
|
||||
/// @param newConfig The new config.
|
||||
function setBatchConfig(address token, BatchConfig calldata newConfig) external onlyRole(DEFAULT_ADMIN_ROLE) {
|
||||
if (
|
||||
newConfig.maxTxsPerBatch == 0 ||
|
||||
newConfig.maxDelayPerBatch == 0 ||
|
||||
newConfig.feeAmountPerTx > newConfig.minAmountPerTx
|
||||
) {
|
||||
revert ErrorInvalidBatchConfig();
|
||||
}
|
||||
configs[token] = newConfig;
|
||||
}
|
||||
|
||||
/// @notice Initiate the batch bridge of current pending batch.
|
||||
/// @param token The address of the token.
|
||||
function executeBatchDeposit(address token) external payable onlyRole(KEEPER_ROLE) {
|
||||
BatchConfig memory cachedBatchConfig = configs[token];
|
||||
TokenState memory cachedTokenState = tokens[token];
|
||||
_tryFinalizeCurrentBatch(token, cachedBatchConfig, cachedTokenState);
|
||||
|
||||
// no batch to bridge
|
||||
if (cachedTokenState.currentBatchIndex == cachedTokenState.pendingBatchIndex) {
|
||||
revert ErrorNoPendingBatch();
|
||||
}
|
||||
|
||||
// check bridge fee
|
||||
uint256 depositFee = IL1MessageQueue(queue).estimateCrossDomainMessageFee(cachedBatchConfig.safeBridgeGasLimit);
|
||||
uint256 batchBridgeFee = IL1MessageQueue(queue).estimateCrossDomainMessageFee(SAFE_BATCH_BRIDGE_GAS_LIMIT);
|
||||
if (msg.value < depositFee + batchBridgeFee) {
|
||||
revert ErrorInsufficientMsgValueForBatchDepositFee();
|
||||
}
|
||||
|
||||
// take accumulated fee to fee vault
|
||||
uint256 accumulatedFee;
|
||||
if (token == address(0)) {
|
||||
// no uncheck here just in case
|
||||
accumulatedFee = address(this).balance - msg.value - cachedTokenState.pending;
|
||||
} else {
|
||||
// no uncheck here just in case
|
||||
accumulatedFee = IERC20Upgradeable(token).balanceOf(address(this)) - cachedTokenState.pending;
|
||||
}
|
||||
if (accumulatedFee > 0) {
|
||||
_transferToken(token, feeVault, accumulatedFee);
|
||||
}
|
||||
|
||||
// deposit token to L2
|
||||
BatchState memory cachedBatchState = batches[token][cachedTokenState.pendingBatchIndex];
|
||||
address l2Token;
|
||||
if (token == address(0)) {
|
||||
IL1ScrollMessenger(messenger).sendMessage{value: cachedBatchState.amount + depositFee}(
|
||||
counterpart,
|
||||
cachedBatchState.amount,
|
||||
new bytes(0),
|
||||
cachedBatchConfig.safeBridgeGasLimit
|
||||
);
|
||||
} else {
|
||||
address gateway = IL1GatewayRouter(router).getERC20Gateway(token);
|
||||
l2Token = IL1ERC20Gateway(gateway).getL2ERC20Address(token);
|
||||
IERC20Upgradeable(token).safeApprove(gateway, 0);
|
||||
IERC20Upgradeable(token).safeApprove(gateway, cachedBatchState.amount);
|
||||
IL1ERC20Gateway(gateway).depositERC20{value: depositFee}(
|
||||
token,
|
||||
counterpart,
|
||||
cachedBatchState.amount,
|
||||
cachedBatchConfig.safeBridgeGasLimit
|
||||
);
|
||||
}
|
||||
|
||||
// notify `L2BatchBridgeGateway`
|
||||
IL1ScrollMessenger(messenger).sendMessage{value: batchBridgeFee}(
|
||||
counterpart,
|
||||
0,
|
||||
abi.encodeCall(
|
||||
L2BatchBridgeGateway.finalizeBatchDeposit,
|
||||
(token, l2Token, cachedTokenState.pendingBatchIndex, cachedBatchState.hash)
|
||||
),
|
||||
SAFE_BATCH_BRIDGE_GAS_LIMIT
|
||||
);
|
||||
|
||||
emit BatchDeposit(_msgSender(), token, cachedTokenState.pendingBatchIndex, l2Token);
|
||||
|
||||
// update token state
|
||||
unchecked {
|
||||
cachedTokenState.pending -= uint128(cachedBatchState.amount);
|
||||
cachedTokenState.pendingBatchIndex += 1;
|
||||
}
|
||||
tokens[token] = cachedTokenState;
|
||||
|
||||
// refund keeper fee
|
||||
unchecked {
|
||||
if (msg.value > depositFee + batchBridgeFee) {
|
||||
_transferToken(address(0), _msgSender(), msg.value - depositFee - batchBridgeFee);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to deposit token.
|
||||
/// @param token The address of token to deposit.
|
||||
/// @param sender The address of token sender.
|
||||
/// @param amount The amount of token to deposit.
|
||||
function _deposit(
|
||||
address token,
|
||||
address sender,
|
||||
uint96 amount
|
||||
) internal {
|
||||
BatchConfig memory cachedBatchConfig = configs[token];
|
||||
TokenState memory cachedTokenState = tokens[token];
|
||||
_tryFinalizeCurrentBatch(token, cachedBatchConfig, cachedTokenState);
|
||||
|
||||
if (amount < cachedBatchConfig.minAmountPerTx) {
|
||||
revert ErrorDepositAmountTooSmall();
|
||||
}
|
||||
BatchState memory cachedBatchState = batches[token][cachedTokenState.currentBatchIndex];
|
||||
|
||||
emit Deposit(sender, token, cachedTokenState.currentBatchIndex, amount, cachedBatchConfig.feeAmountPerTx);
|
||||
|
||||
// deduct fee and update cached state
|
||||
unchecked {
|
||||
amount -= cachedBatchConfig.feeAmountPerTx;
|
||||
cachedTokenState.pending += amount;
|
||||
cachedBatchState.amount += amount;
|
||||
cachedBatchState.numDeposits += 1;
|
||||
}
|
||||
|
||||
// compute the hash chain
|
||||
bytes32 node = BatchBridgeCodec.encodeNode(sender, amount);
|
||||
if (cachedBatchState.hash == bytes32(0)) {
|
||||
bytes32 initialNode = BatchBridgeCodec.encodeInitialNode(token, cachedTokenState.currentBatchIndex);
|
||||
// this is first tx in this batch
|
||||
cachedBatchState.hash = BatchBridgeCodec.hash(initialNode, node);
|
||||
cachedBatchState.startTime = uint64(block.timestamp);
|
||||
} else {
|
||||
cachedBatchState.hash = BatchBridgeCodec.hash(cachedBatchState.hash, node);
|
||||
}
|
||||
|
||||
batches[token][cachedTokenState.currentBatchIndex] = cachedBatchState;
|
||||
tokens[token] = cachedTokenState;
|
||||
}
|
||||
|
||||
/// @dev Internal function to finalize current batch.
|
||||
/// This function may change the value of `cachedTokenState`, which can be used in later operation.
|
||||
/// @param token The address of token to finalize.
|
||||
/// @param cachedBatchConfig The cached batch config in memory.
|
||||
/// @param cachedTokenState The cached token state in memory.
|
||||
function _tryFinalizeCurrentBatch(
|
||||
address token,
|
||||
BatchConfig memory cachedBatchConfig,
|
||||
TokenState memory cachedTokenState
|
||||
) internal view {
|
||||
if (cachedBatchConfig.maxTxsPerBatch == 0) {
|
||||
revert ErrorTokenNotSupported();
|
||||
}
|
||||
BatchState memory cachedBatchState = batches[token][cachedTokenState.currentBatchIndex];
|
||||
// return if it is the very first deposit in the current batch
|
||||
if (cachedBatchState.numDeposits == 0) return;
|
||||
|
||||
// finalize current batchIndex when `maxTxsPerBatch` or `maxDelayPerBatch` reached.
|
||||
if (
|
||||
cachedBatchState.numDeposits == cachedBatchConfig.maxTxsPerBatch ||
|
||||
block.timestamp - cachedBatchState.startTime > cachedBatchConfig.maxDelayPerBatch
|
||||
) {
|
||||
cachedTokenState.currentBatchIndex += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to transfer token, including ETH.
|
||||
/// @param token The address of token.
|
||||
/// @param receiver The address of token receiver.
|
||||
/// @param amount The amount of token to transfer.
|
||||
function _transferToken(
|
||||
address token,
|
||||
address receiver,
|
||||
uint256 amount
|
||||
) private {
|
||||
if (token == address(0)) {
|
||||
(bool success, ) = receiver.call{value: amount}("");
|
||||
if (!success) revert ErrorTransferETHFailed();
|
||||
} else {
|
||||
IERC20Upgradeable(token).safeTransfer(receiver, amount);
|
||||
}
|
||||
}
|
||||
}
|
||||
246
contracts/src/batch-bridge/L2BatchBridgeGateway.sol
Normal file
246
contracts/src/batch-bridge/L2BatchBridgeGateway.sol
Normal file
@@ -0,0 +1,246 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
import {AccessControlEnumerableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/AccessControlEnumerableUpgradeable.sol";
|
||||
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
|
||||
|
||||
import {IL2ScrollMessenger} from "../L2/IL2ScrollMessenger.sol";
|
||||
import {BatchBridgeCodec} from "./BatchBridgeCodec.sol";
|
||||
|
||||
/// @title L2BatchBridgeGateway
|
||||
contract L2BatchBridgeGateway is AccessControlEnumerableUpgradeable {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when token mapping for ERC20 token is updated.
|
||||
/// @param l2Token The address of corresponding ERC20 token in layer 2.
|
||||
/// @param oldL1Token The address of the old corresponding ERC20 token in layer 1.
|
||||
/// @param newL1Token The address of the new corresponding ERC20 token in layer 1.
|
||||
event UpdateTokenMapping(address indexed l2Token, address indexed oldL1Token, address indexed newL1Token);
|
||||
|
||||
/// @notice Emitted when batch bridge is finalized.
|
||||
/// @param l1Token The address of token in L1.
|
||||
/// @param l2Token The address of token in L2.
|
||||
/// @param batchIndex The index of batch finalized.
|
||||
event FinalizeBatchDeposit(address indexed l1Token, address indexed l2Token, uint256 indexed batchIndex);
|
||||
|
||||
/// @notice Emitted when batch distribution finished.
|
||||
/// @param l1Token The address of token in L1.
|
||||
/// @param l2Token The address of token in L2.
|
||||
/// @param batchIndex The index of batch distributed.
|
||||
event BatchDistribute(address indexed l1Token, address indexed l2Token, uint256 indexed batchIndex);
|
||||
|
||||
/// @notice Emitted when token distribute failed.
|
||||
/// @param l2Token The address of token in L2.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @param receiver The address of token receiver.
|
||||
/// @param amount The amount of token to distribute.
|
||||
event DistributeFailed(address indexed l2Token, uint256 indexed batchIndex, address receiver, uint256 amount);
|
||||
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when caller is not `messenger`.
|
||||
error ErrorCallerNotMessenger();
|
||||
|
||||
/// @dev Thrown when the L1 token mapping mismatch with `finalizeBatchBridge`.
|
||||
error ErrorL1TokenMismatched();
|
||||
|
||||
/// @dev Thrown when message sender is not `counterpart`.
|
||||
error ErrorMessageSenderNotCounterpart();
|
||||
|
||||
/// @dev Thrown no failed distribution exists.
|
||||
error ErrorNoFailedDistribution();
|
||||
|
||||
/// @dev Thrown when the batch hash mismatch.
|
||||
error ErrorBatchHashMismatch();
|
||||
|
||||
/// @dev Thrown when distributing the same batch.
|
||||
error ErrorBatchDistributed();
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @notice The role for batch deposit keeper.
|
||||
bytes32 public constant KEEPER_ROLE = keccak256("KEEPER_ROLE");
|
||||
|
||||
/// @notice The safe gas limit for ETH transfer
|
||||
uint256 private constant SAFE_ETH_TRANSFER_GAS_LIMIT = 50000;
|
||||
|
||||
/// @notice The address of corresponding `L1BatchBridgeGateway` contract.
|
||||
address public immutable counterpart;
|
||||
|
||||
/// @notice The address of corresponding `L2ScrollMessenger` contract.
|
||||
address public immutable messenger;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Mapping from l2 token address to l1 token address.
|
||||
mapping(address => address) public tokenMapping;
|
||||
|
||||
/// @notice Mapping from L2 token address to batch index to batch hash.
|
||||
mapping(address => mapping(uint256 => bytes32)) public batchHashes;
|
||||
|
||||
/// @notice Mapping from token address to the amount of failed distribution.
|
||||
mapping(address => uint256) public failedAmount;
|
||||
|
||||
/// @notice Mapping from batch hash to the distribute status.
|
||||
mapping(bytes32 => bool) public isDistributed;
|
||||
|
||||
/*************
|
||||
* Modifiers *
|
||||
*************/
|
||||
|
||||
modifier onlyMessenger() {
|
||||
if (_msgSender() != messenger) {
|
||||
revert ErrorCallerNotMessenger();
|
||||
}
|
||||
_;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
/// @param _counterpart The address of `L1BatchBridgeGateway` contract in L1.
|
||||
/// @param _messenger The address of `L2ScrollMessenger` contract in L2.
|
||||
constructor(address _counterpart, address _messenger) {
|
||||
_disableInitializers();
|
||||
|
||||
counterpart = _counterpart;
|
||||
messenger = _messenger;
|
||||
}
|
||||
|
||||
/// @notice Initialize the storage of `L2BatchBridgeGateway`.
|
||||
function initialize() external initializer {
|
||||
__Context_init(); // from ContextUpgradeable
|
||||
__ERC165_init(); // from ERC165Upgradeable
|
||||
__AccessControl_init(); // from AccessControlUpgradeable
|
||||
__AccessControlEnumerable_init(); // from AccessControlEnumerableUpgradeable
|
||||
|
||||
_grantRole(DEFAULT_ADMIN_ROLE, _msgSender());
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @notice Receive batch bridged ETH from `L2ScrollMessenger`.
|
||||
receive() external payable onlyMessenger {
|
||||
// empty
|
||||
}
|
||||
|
||||
/// @notice Finalize L1 initiated batch token deposit.
|
||||
/// @param l1Token The address of the token in L1.
|
||||
/// @param l2Token The address of the token in L2.
|
||||
/// @param batchIndex The index of this batch bridge.
|
||||
/// @param hash The hash of this batch.
|
||||
function finalizeBatchDeposit(
|
||||
address l1Token,
|
||||
address l2Token,
|
||||
uint256 batchIndex,
|
||||
bytes32 hash
|
||||
) external onlyMessenger {
|
||||
if (counterpart != IL2ScrollMessenger(messenger).xDomainMessageSender()) {
|
||||
revert ErrorMessageSenderNotCounterpart();
|
||||
}
|
||||
|
||||
// trust the messenger and update `tokenMapping` in first call
|
||||
// another assumption is this function should never fail due to out of gas
|
||||
address storedL1Token = tokenMapping[l2Token];
|
||||
if (storedL1Token == address(0) && l1Token != address(0)) {
|
||||
tokenMapping[l2Token] = l1Token;
|
||||
} else if (storedL1Token != l1Token) {
|
||||
// this usually won't happen, check just in case.
|
||||
revert ErrorL1TokenMismatched();
|
||||
}
|
||||
|
||||
batchHashes[l2Token][batchIndex] = hash;
|
||||
|
||||
emit FinalizeBatchDeposit(l1Token, l2Token, batchIndex);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Withdraw distribution failed tokens.
|
||||
/// @param token The address of token to withdraw.
|
||||
/// @param receiver The address of token receiver.
|
||||
function withdrawFailedAmount(address token, address receiver) external onlyRole(DEFAULT_ADMIN_ROLE) {
|
||||
uint256 amount = failedAmount[token];
|
||||
if (amount == 0) revert ErrorNoFailedDistribution();
|
||||
failedAmount[token] = 0;
|
||||
|
||||
_transferToken(token, receiver, amount);
|
||||
}
|
||||
|
||||
/// @notice Distribute deposited token to corresponding receivers.
|
||||
/// @param l2Token The address of L2 token.
|
||||
/// @param batchIndex The index of batch to distribute.
|
||||
/// @param nodes The list of encoded L1 deposits.
|
||||
function distribute(
|
||||
address l2Token,
|
||||
uint64 batchIndex,
|
||||
bytes32[] memory nodes
|
||||
) external onlyRole(KEEPER_ROLE) {
|
||||
address l1Token = tokenMapping[l2Token];
|
||||
bytes32 hash = BatchBridgeCodec.encodeInitialNode(l1Token, batchIndex);
|
||||
for (uint256 i = 0; i < nodes.length; i++) {
|
||||
hash = BatchBridgeCodec.hash(hash, nodes[i]);
|
||||
}
|
||||
if (batchHashes[l2Token][batchIndex] != hash) {
|
||||
revert ErrorBatchHashMismatch();
|
||||
}
|
||||
if (isDistributed[hash]) {
|
||||
revert ErrorBatchDistributed();
|
||||
}
|
||||
isDistributed[hash] = true;
|
||||
|
||||
// do transfer and allow failure to avoid DDOS attack
|
||||
for (uint256 i = 0; i < nodes.length; i++) {
|
||||
(address receiver, uint256 amount) = BatchBridgeCodec.decodeNode(nodes[i]);
|
||||
if (!_transferToken(l2Token, receiver, amount)) {
|
||||
failedAmount[l2Token] += amount;
|
||||
|
||||
emit DistributeFailed(l2Token, batchIndex, receiver, amount);
|
||||
}
|
||||
}
|
||||
|
||||
emit BatchDistribute(l1Token, l2Token, batchIndex);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to transfer token, including ETH.
|
||||
/// @param token The address of token.
|
||||
/// @param receiver The address of token receiver.
|
||||
/// @param amount The amount of token to transfer.
|
||||
/// @return success Whether the transfer is successful.
|
||||
function _transferToken(
|
||||
address token,
|
||||
address receiver,
|
||||
uint256 amount
|
||||
) private returns (bool success) {
|
||||
if (token == address(0)) {
|
||||
// We add gas limit here to avoid DDOS from malicious receiver.
|
||||
(success, ) = receiver.call{value: amount, gas: SAFE_ETH_TRANSFER_GAS_LIMIT}("");
|
||||
} else {
|
||||
// We perform a low level call here, to bypass Solidity's return data size checking mechanism.
|
||||
// Normally, the token is selected that the call would not revert unless out of gas.
|
||||
bytes memory returnData;
|
||||
(success, returnData) = token.call(abi.encodeCall(IERC20Upgradeable.transfer, (receiver, amount)));
|
||||
if (success && returnData.length > 0) {
|
||||
success = abi.decode(returnData, (bool));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -79,7 +79,7 @@ library BatchHeaderV0Codec {
|
||||
|
||||
/// @notice Get the number of L1 messages popped before this batch.
|
||||
/// @param batchPtr The start memory offset of the batch header in memory.
|
||||
/// @return _totalL1MessagePopped The the number of L1 messages popped before this batch.
|
||||
/// @return _totalL1MessagePopped The number of L1 messages popped before this batch.
|
||||
function getTotalL1MessagePopped(uint256 batchPtr) internal pure returns (uint256 _totalL1MessagePopped) {
|
||||
assembly {
|
||||
_totalL1MessagePopped := shr(192, mload(add(batchPtr, 17)))
|
||||
|
||||
@@ -79,7 +79,7 @@ library BatchHeaderV1Codec {
|
||||
|
||||
/// @notice Get the number of L1 messages popped before this batch.
|
||||
/// @param batchPtr The start memory offset of the batch header in memory.
|
||||
/// @return _totalL1MessagePopped The the number of L1 messages popped before this batch.
|
||||
/// @return _totalL1MessagePopped The number of L1 messages popped before this batch.
|
||||
function getTotalL1MessagePopped(uint256 batchPtr) internal pure returns (uint256 _totalL1MessagePopped) {
|
||||
assembly {
|
||||
_totalL1MessagePopped := shr(192, mload(add(batchPtr, 17)))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user