Compare commits

..

53 Commits

Author SHA1 Message Date
Mengran Lan
4270d34aa9 tmp changes for local test 2024-06-03 19:58:03 +08:00
Mengran Lan
89614c8dcb upgrade legacy_vk vkeys to v0.10.3 2024-06-03 16:32:26 +08:00
Mengran Lan
f9842621bc fix test issue 2024-06-03 16:32:26 +08:00
Mengran Lan
e8643b5206 no longer send acceptversions when login 2024-06-03 16:32:26 +08:00
Mengran Lan
2407db9f71 coordinator support multi-circuits prover 2024-06-03 16:32:26 +08:00
marlowl
6b11e20ca6 fix(test): context propagation fix in InsertBatch function (#1346)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-06-02 21:06:32 +08:00
Xi Lin
f12e8e3baf feat(contracts): change MAX_COMMIT_SCALAR and MAX_BLOB_SCALAR to 1e18 (#1354) 2024-06-01 10:42:07 +08:00
colin
ba77a74743 feat(rollup-relayer): support codecv2 (#1298)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: Zhang Zhuo <mycinbrin@gmail.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-05-30 19:00:40 +08:00
colin
1ddfe57e5b feat(gas-oracle): add gas price update after Curie (#1344) 2024-05-29 17:55:06 +08:00
Péter Garamvölgyi
c48ae961a5 feat(contracts): accept batches with version > 1 (#1317)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Xi Lin <zimpha@gmail.com>
2024-05-29 11:38:32 +08:00
Xi Lin
7059ad0ed4 feat(contracts): update L1GasPriceOracle for Curie fork (#1343)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2024-05-28 13:36:56 +08:00
JayLiu
07b8ae20ac change pos l1 listen port (#1339)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2024-05-19 20:43:05 +08:00
colin
934ea33443 fix(sender): nonce too low error catch (#1337)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-05-17 17:57:39 +08:00
georgehao
f2fd8963a9 feat(bridge-history): bridge batch deposit (#1310)
Co-authored-by: zimpha <zimpha@gmail.com>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2024-05-17 17:42:05 +08:00
cario-dev
e64bb7725f ci: upgrade checkout action to version with node20 (#1336)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-05-15 07:31:04 +08:00
Sina Pilehchiha
c8cfa9c15d Fix batch deposit (#1334) 2024-05-14 16:51:11 -04:00
Xin.Zh
612e66ebc2 move common/docker to common/testcontainers/docker (#1323)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-05-08 22:40:26 +08:00
colin
1c7c30c9ae feat(sender): add min gas tip (#1331) 2024-05-08 12:52:37 +08:00
colin
320ab56d1d feat(codecv1): add commit gas and calldata size estimation (#1324)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-05-07 16:35:49 +08:00
georgehao
54415f6a78 feat: fix start poS failed (#1330) 2024-05-07 16:22:58 +08:00
joao
87c9c33bcc docs: update README.md to specify solc version and correct config paths (#1314)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-05-07 10:59:52 +08:00
georgehao
fdcd43a296 feat: fix lint failed (#1326) 2024-05-07 10:35:32 +08:00
lugosi
a1a7f25921 build: update golangci-lint (#1321)
Co-authored-by: kongfanfu <kongfanfu@bytedance.com>
2024-05-06 09:47:41 +08:00
JayLiu
8be70f0c80 fix start testcontainers fail bug (#1313) 2024-05-02 21:12:20 +08:00
JayLiu
d1bec53e50 test: replace l1GethContainer with poSL1Container (#1312)
Co-authored-by: TKTech660 <liujay48@gmail.com>
2024-04-30 16:25:22 +08:00
SamiAlHassan
0723b463c5 build: upgrade go-ethereum from scroll-v5.1.6 to scroll-v5.3.0 (#1304)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-30 16:25:12 +08:00
Xin.Zh
46b1ff3284 modify testcontainer (#1302)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-30 14:44:26 +08:00
alwayshang
a3635dba52 docs(coordinator): fix function name (#1297)
Signed-off-by: alwayshang <zhanghonghao@outlook.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-30 11:32:48 +08:00
colin
34ad8ca772 fix(batch-proposer): potential panic risk (#1311) 2024-04-30 08:46:13 +08:00
yanziseeker
8a2a2eb292 docs:clean repetitive words (#1309) 2024-04-28 16:42:48 +08:00
Andi
8ca89374a0 chore: remove repetitive words (#1289)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-28 14:10:21 +08:00
Xi Lin
8128526116 feat(contracts): batch token bridge (#1282) 2024-04-28 12:11:15 +08:00
Péter Garamvölgyi
9262e9af69 Reapply "build(ci): make all intermediate images compatible with multi platforms (#1291)"
This reverts commit 5090b77655.
2024-04-28 11:25:10 +08:00
Péter Garamvölgyi
5090b77655 Revert "build(ci): make all intermediate images compatible with multi platforms (#1291)"
This reverts commit 8f8f6eb1a1.
2024-04-28 10:54:17 +08:00
colin
4cafc9349a feat(gas-oracle): tweak gas price update logic (#1305) 2024-04-28 10:47:04 +08:00
colin
ca6f856372 docs: remove local testing image Dockerfile and docs for mac M1/M2 silicon (#1300) 2024-04-25 08:32:57 +08:00
Mengran Lan
72ee087f35 fix(db): change prover_block_list's index on public_key from non-unique to unique (#1294)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2024-04-24 11:00:09 +08:00
sbaizet
8f8f6eb1a1 build(ci): make all intermediate images compatible with multi platforms (#1291) 2024-04-23 17:20:03 +08:00
Péter Garamvölgyi
c56bda9f47 feat(gas-oracle): relay blob base fee after Bernoulli block (#1293) 2024-04-23 16:53:39 +08:00
Péter Garamvölgyi
433d5c2f52 feat: publish rollup-db-cli image (#1280)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-22 20:19:15 +08:00
georgehao
69c0f7ed75 feat: upgarde gomonkey version (#1290) 2024-04-22 16:03:41 +08:00
colin
c25b827666 docs: add bridge-history deployment in readme (#1281)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-22 14:38:39 +08:00
georgehao
da4f6818e3 feat: upgrade scroll to go1.21 (#1285)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-04-22 14:35:50 +08:00
georgehao
200ca7c15b fix(coordinator): fix coordinator cron exit issue (#1286)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-22 09:26:53 +08:00
Mengran Lan
53cf26597d perf(db): add indexes for table prover_task, columns respectively: task_id, created_at (#1283) 2024-04-19 16:54:13 +08:00
sbaizet
f0f7341271 ci - Docker support for arm64 on all services (#1278) 2024-04-16 10:06:29 +02:00
sbaizet
b6af88c936 ci - Docker support for arm64 (#1276) 2024-04-15 16:18:25 +02:00
sbaizet
de541a650a ci - fix github action to support arm64 platform (#1275) 2024-04-15 15:35:18 +02:00
colin
d7a57235d3 fix(rollup-relayer): tweak logs (#1274)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-15 14:07:26 +08:00
sbaizet
91d21301ec ci: add support for arm64 on event-watcher images (#1269) 2024-04-14 16:27:59 +08:00
Daniel Helm
4b32a44a70 docs(coordinator): fix internal links to config files in README (#1272) 2024-04-14 16:27:17 +08:00
georgehao
55b400c5fb fix: rollup make lint failure (#1273) 2024-04-14 16:26:35 +08:00
JayLiu
1b49091207 test: fix testcontainers listen ports (#1270)
Co-authored-by: liuyuecai <liuyuecai1995@gmail.com>
2024-04-13 13:30:22 +08:00
193 changed files with 6837 additions and 6438 deletions

View File

@@ -34,7 +34,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
@@ -48,7 +48,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Test
run: |
make test
@@ -67,7 +67,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/bridge-history-api/ -w .

View File

@@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
ref: ${{ github.head_ref }}
- name: check diff

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go
@@ -37,7 +37,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
@@ -56,7 +56,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -81,7 +81,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:

View File

@@ -31,7 +31,7 @@ jobs:
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
submodules: recursive
@@ -98,7 +98,7 @@ jobs:
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
submodules: recursive

View File

@@ -33,15 +33,15 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Lint
working-directory: 'coordinator'
run: |
@@ -54,9 +54,9 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -77,7 +77,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# uses: actions/checkout@v4
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - name: Build and push
@@ -95,9 +95,9 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:

View File

@@ -34,7 +34,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Lint
working-directory: 'database'
run: |
@@ -49,7 +49,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -74,7 +74,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -46,6 +46,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
platforms: linux/amd64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -57,7 +58,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -90,6 +91,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
platforms: linux/amd64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -101,7 +103,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -134,6 +136,52 @@ jobs:
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
platforms: linux/amd64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup-db-cli:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: rollup-db-cli
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: rollup-db-cli
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/db_cli.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -145,7 +193,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -178,6 +226,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -189,7 +238,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -222,6 +271,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -233,7 +283,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -266,6 +316,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
platforms: linux/amd64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -277,7 +328,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -310,6 +361,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-cron.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}

View File

@@ -24,7 +24,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:

View File

@@ -7,12 +7,12 @@ on:
description: 'Go version'
required: true
type: string
default: '1.20'
default: '1.21'
RUST_VERSION:
description: 'Rust toolchain version'
required: true
type: string
default: 'nightly-2022-12-10'
default: 'nightly-2023-12-03'
PYTHON_VERSION:
description: 'Python version'
required: false
@@ -29,31 +29,191 @@ defaults:
working-directory: 'build/dockerfiles/intermediate'
jobs:
build-and-push:
build-and-publish-cuda-go-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build
run: |
make all
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Publish
run: |
make publish
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/cuda-go-rust-builder.Dockerfile
tags: scrolltech/cuda-go-rust-builder:cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-rust-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-rust-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-alpine-builder:${{ github.event.inputs.GO_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/rust-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/rust-builder:${{ github.event.inputs.RUST_VERSION }}
build-args: |
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-rust-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/rust-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/rust-alpine-builder:${{ github.event.inputs.RUST_VERSION }}
build-args: |
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-rust-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-rust-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-rust-alpine-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-py-runner:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/py-runner.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/py-runner:${{ github.event.inputs.PYTHON_VERSION }}
build-args: |
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}

View File

@@ -34,7 +34,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
@@ -50,7 +50,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go
@@ -58,7 +58,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
@@ -75,7 +75,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
@@ -89,7 +89,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/prover/ -w .

View File

@@ -36,7 +36,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
@@ -60,7 +60,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -85,7 +85,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
@@ -105,7 +105,7 @@ jobs:
- name: Test rollup packages
working-directory: 'rollup'
run: |
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
./run_test.sh
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
@@ -117,7 +117,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v2
# uses: actions/checkout@v4
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - run: make docker

View File

@@ -1,6 +1,6 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.1.6
L2GETH_TAG=scroll-v5.3.0
help: ## Display this help message
@grep -h \
@@ -44,14 +44,8 @@ fmt: ## format the code
dev_docker: ## build docker images for development/testing usages
docker pull postgres
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
docker build -t scroll_test_image -f ./build/dockerfiles/local_testing.Dockerfile $$(mktemp -d)
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app -e HOST_PATH=$(PWD) scroll_test_image
docker build -t scroll_l1geth ./common/testcontainers/docker/l1geth/
docker build -t scroll_l2geth ./common/testcontainers/docker/l2geth/
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -41,47 +41,23 @@ docker pull postgres
make dev_docker
```
## Testing Rollup & Coordinator
### For Non-Apple Silicon (M1/M2) Macs
## Unit Tests
Run the tests using the following commands:
```bash
export LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
export SCROLL_LIB_PATH=/scroll/lib
sudo mkdir -p $SCROLL_LIB_PATH
sudo wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SCROLL_LIB_PATH
export CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
### For Apple Silicon (M1/M2) Macs
To run tests on Apple Silicon Macs, build and execute the Docker image as outlined below:
#### Build a Docker Image for Testing
Use the following command to build a Docker image:
```bash
make build_test_docker
```
This command builds a Docker image named `scroll_test_image` using the Dockerfile found at `./build/dockerfiles/local_test.Dockerfile`.
#### Run Docker Image
After the image is built, run a Docker container from it:
```bash
make run_test_docker
```
This command runs a Docker container named `scroll_test_container` from the `scroll_test_image` image. The container uses the host network and has access to the Docker socket and the current directory.
Once the Docker container is running, execute the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...

View File

@@ -79,3 +79,50 @@ provides REST APIs. Please refer to the API details below.
// @Success 200
// @Router /api/txsbyhashes [post]
```
## Running bridge-history-api locally
1. Pull the latest Redis image:
```
docker pull redis:latest
```
2. Run the Redis container:
```
docker run --name bridgehistoryapi-redis -d -p 6379:6379 redis:latest
```
3. Pull the latest PostgreSQL image:
```
docker pull postgres:latest
```
4. Run the PostgreSQL container:
```
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
```
5. Run database migrations to initialize the tables:
```
make bridgehistoryapi-db-cli
./build/bin/bridgehistoryapi-db-cli migrate
```
6. Run bridgehistoryapi-fetcher:
```
make bridgehistoryapi-fetcher
./build/bin/bridgehistoryapi-fetcher
```
7. Run bridgehistoryapi-api:
```
make bridgehistoryapi-api
./build/bin/bridgehistoryapi-api
```
The endpoints provided in [./conf/config.json](./conf/config.json) are all public endpoints and have rate limits.
For production usage:
- For L1 endpoints, utilizing a service provider's free tier should suffice.
- For L2 endpoints, consider [running a Scroll L2geth node](https://docs.scroll.xyz/en/developers/guides/running-a-scroll-node) and using the exposed HTTP port.

File diff suppressed because one or more lines are too long

View File

@@ -26,32 +26,27 @@ func init() {
Name: "reset",
Usage: "Clean and reset database.",
Action: resetDB,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "status",
Usage: "Check migration status.",
Action: checkDBStatus,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "version",
Usage: "Display the current database version.",
Action: dbVersion,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "migrate",
Usage: "Migrate the database to the latest version.",
Action: migrateDB,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "rollback",
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
Action: rollbackDB,
Flags: []cli.Flag{
&utils.ConfigFileFlag,
&cli.IntFlag{
Name: "version",
Usage: "Rollback to the specified version.",

View File

@@ -15,9 +15,11 @@
"USDCGatewayAddr": "0xf1AF3b23DE0A5Ca3CAb7261cb0061C0D779A5c7B",
"LIDOGatewayAddr": "0x6625C6332c9F91F2D27c304E729B86db87A3f504",
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
"PufferGatewayAddr": "0xA033Ff09f2da45f0e9ae495f525363722Df42b2a",
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
"BatchBridgeGatewayAddr": "0x0000000000000000000000000000000000000000"
},
"L2": {
"confirmation": 0,
@@ -34,7 +36,10 @@
"USDCGatewayAddr": "0x33B60d5Dd260d453cAC3782b0bDC01ce84672142",
"LIDOGatewayAddr": "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9",
"DAIGatewayAddr": "0xaC78dff3A87b5b534e366A93E785a0ce8fA6Cc62",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79"
"PufferGatewayAddr": "0x9eBf2f33526CD571f8b2ad312492cb650870CFd6",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79",
"MessageQueueAddr": "0x5300000000000000000000000000000000000000",
"BatchBridgeGatewayAddr": "0x0000000000000000000000000000000000000000"
},
"db": {
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",

View File

@@ -7,12 +7,12 @@ require (
github.com/gin-gonic/gin v1.9.1
github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.6.0
gorm.io/gorm v1.25.5
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
require (
@@ -28,12 +28,12 @@ require (
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/cli v25.0.1+incompatible // indirect
github.com/docker/docker v25.0.3+incompatible // indirect
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
github.com/docker/docker v26.1.0+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
@@ -49,9 +49,7 @@ require (
github.com/go-playground/validator/v10 v10.15.5 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
@@ -72,10 +70,10 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
@@ -84,20 +82,19 @@ require (
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.7.1 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/sethvargo/go-retry v0.2.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
@@ -109,14 +106,15 @@ require (
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.17.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/protobuf v1.31.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

View File

@@ -59,8 +59,8 @@ github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7b
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -71,10 +71,10 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU=
github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible h1:xhVCHXq+P5LhT31+RuDuk0xXEbEnd50Fr37J1bGuyWg=
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v26.1.0+incompatible h1:W1G9MPNbskA6VZWL7b3ZljTh0pXI68FpINx0GKaOdaM=
github.com/docker/docker v26.1.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -147,7 +147,6 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -156,7 +155,6 @@ github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -229,8 +227,6 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -239,6 +235,8 @@ github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8oh
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -285,14 +283,14 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/pressly/goose/v3 v3.16.0 h1:xMJUsZdHLqSnCqESyKSqEfcYVYsUuup1nrOhaEFftQg=
github.com/pressly/goose/v3 v3.16.0/go.mod h1:JwdKVnmCRhnF6XLQs2mHEQtucFD49cQBdRM4UiwkxsM=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
@@ -311,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
@@ -341,8 +339,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0=
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
@@ -385,19 +383,18 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
@@ -429,8 +426,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
@@ -443,10 +440,8 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -466,8 +461,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde h1:9DShaph9qhkIYw7QF91I/ynrr4cOO2PZra2PFD7Mfeg=
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=

View File

@@ -29,6 +29,7 @@ type FetcherConfig struct {
ScrollChainAddr string `json:"ScrollChainAddr"`
GatewayRouterAddr string `json:"GatewayRouterAddr"`
MessageQueueAddr string `json:"MessageQueueAddr"`
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
}
// RedisConfig redis config

View File

@@ -8,8 +8,17 @@ import (
)
var (
// HistoryCtrler is controller instance
HistoryCtrler *HistoryController
// TxsByAddressCtl the TxsByAddressController instance
TxsByAddressCtl *TxsByAddressController
// TxsByHashesCtl the TxsByHashesController instance
TxsByHashesCtl *TxsByHashesController
// L2UnclaimedWithdrawalsByAddressCtl the L2UnclaimedWithdrawalsByAddressController instance
L2UnclaimedWithdrawalsByAddressCtl *L2UnclaimedWithdrawalsByAddressController
// L2WithdrawalsByAddressCtl the L2WithdrawalsByAddressController instance
L2WithdrawalsByAddressCtl *L2WithdrawalsByAddressController
initControllerOnce sync.Once
)
@@ -17,6 +26,9 @@ var (
// InitController inits Controller with database
func InitController(db *gorm.DB, redis *redis.Client) {
initControllerOnce.Do(func() {
HistoryCtrler = NewHistoryController(db, redis)
TxsByAddressCtl = NewTxsByAddressController(db, redis)
TxsByHashesCtl = NewTxsByHashesController(db, redis)
L2UnclaimedWithdrawalsByAddressCtl = NewL2UnclaimedWithdrawalsByAddressController(db, redis)
L2WithdrawalsByAddressCtl = NewL2WithdrawalsByAddressController(db, redis)
})
}

View File

@@ -1,94 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// HistoryController contains the query claimable txs service
type HistoryController struct {
historyLogic *logic.HistoryLogic
}
// NewHistoryController return HistoryController instance
func NewHistoryController(db *gorm.DB, redis *redis.Client) *HistoryController {
return &HistoryController{
historyLogic: logic.NewHistoryLogic(db, redis),
}
}
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
func (c *HistoryController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// GetL2WithdrawalsByAddress defines the http get method behavior
func (c *HistoryController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// GetTxsByAddress defines the http get method behavior
func (c *HistoryController) GetTxsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// PostQueryTxsByHashes defines the http post method behavior
func (c *HistoryController) PostQueryTxsByHashes(ctx *gin.Context) {
var req types.QueryByHashRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
return
}
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -0,0 +1,40 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// L2UnclaimedWithdrawalsByAddressController the controller of GetL2UnclaimedWithdrawalsByAddress
type L2UnclaimedWithdrawalsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewL2UnclaimedWithdrawalsByAddressController create new L2UnclaimedWithdrawalsByAddressController
func NewL2UnclaimedWithdrawalsByAddressController(db *gorm.DB, redisClient *redis.Client) *L2UnclaimedWithdrawalsByAddressController {
return &L2UnclaimedWithdrawalsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
func (c *L2UnclaimedWithdrawalsByAddressController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -0,0 +1,40 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// L2WithdrawalsByAddressController the controller of GetL2WithdrawalsByAddress
type L2WithdrawalsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewL2WithdrawalsByAddressController create new L2WithdrawalsByAddressController
func NewL2WithdrawalsByAddressController(db *gorm.DB, redisClient *redis.Client) *L2WithdrawalsByAddressController {
return &L2WithdrawalsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetL2WithdrawalsByAddress defines the http get method behavior
func (c *L2WithdrawalsByAddressController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -0,0 +1,40 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// TxsByAddressController the controller of GetTxsByAddress
type TxsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewTxsByAddressController create new TxsByAddressController
func NewTxsByAddressController(db *gorm.DB, redisClient *redis.Client) *TxsByAddressController {
return &TxsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetTxsByAddress defines the http get method behavior
func (c *TxsByAddressController) GetTxsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -0,0 +1,40 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// TxsByHashesController the controller of PostQueryTxsByHashes
type TxsByHashesController struct {
historyLogic *logic.HistoryLogic
}
// NewTxsByHashesController create a new TxsByHashesController
func NewTxsByHashesController(db *gorm.DB, redisClient *redis.Client) *TxsByHashesController {
return &TxsByHashesController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// PostQueryTxsByHashes query the txs by hashes
func (c *TxsByHashesController) PostQueryTxsByHashes(ctx *gin.Context) {
var req types.QueryByHashRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
return
}
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -63,7 +63,7 @@ func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
// Start starts the L1 message fetching process.
func (c *L1MessageFetcher) Start() {
messageSyncedHeight, batchSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
messageSyncedHeight, batchSyncedHeight, bridgeBatchDepositSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
if dbErr != nil {
log.Crit("L1MessageFetcher start failed", "err", dbErr)
}
@@ -72,6 +72,11 @@ func (c *L1MessageFetcher) Start() {
if batchSyncedHeight > l1SyncHeight {
l1SyncHeight = batchSyncedHeight
}
if bridgeBatchDepositSyncedHeight > l1SyncHeight {
l1SyncHeight = bridgeBatchDepositSyncedHeight
}
if c.cfg.StartHeight > l1SyncHeight {
l1SyncHeight = c.cfg.StartHeight - 1
}
@@ -91,7 +96,13 @@ func (c *L1MessageFetcher) Start() {
c.updateL1SyncHeight(l1SyncHeight, header.Hash())
log.Info("Start L1 message fetcher", "message synced height", messageSyncedHeight, "batch synced height", batchSyncedHeight, "config start height", c.cfg.StartHeight, "sync start height", c.l1SyncHeight+1)
log.Info("Start L1 message fetcher",
"message synced height", messageSyncedHeight,
"batch synced height", batchSyncedHeight,
"bridge batch deposit height", bridgeBatchDepositSyncedHeight,
"config start height", c.cfg.StartHeight,
"sync start height", c.l1SyncHeight+1,
)
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
go func() {

View File

@@ -64,13 +64,17 @@ func NewL2MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
// Start starts the L2 message fetching process.
func (c *L2MessageFetcher) Start() {
l2SentMessageSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
l2SentMessageSyncedHeight, l2BridgeBatchDepositSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
if dbErr != nil {
log.Crit("failed to get L2 cross message processed height", "err", dbErr)
return
}
l2SyncHeight := l2SentMessageSyncedHeight
if l2BridgeBatchDepositSyncedHeight > l2SyncHeight {
l2SyncHeight = l2BridgeBatchDepositSyncedHeight
}
// Sync from an older block to prevent reorg during restart.
if l2SyncHeight < logic.L2ReorgSafeDepth {
l2SyncHeight = 0
@@ -86,7 +90,8 @@ func (c *L2MessageFetcher) Start() {
c.updateL2SyncHeight(l2SyncHeight, header.Hash())
log.Info("Start L2 message fetcher", "message synced height", l2SentMessageSyncedHeight, "sync start height", l2SyncHeight+1)
log.Info("Start L2 message fetcher", "l2 sent message synced height", l2SentMessageSyncedHeight,
"bridge batch deposit synced height", l2BridgeBatchDepositSyncedHeight, "sync start height", l2SyncHeight+1)
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
go func() {
@@ -141,6 +146,11 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
return
}
if updateErr := c.eventUpdateLogic.UpdateL2BridgeBatchDepositEvent(c.ctx, l2FetcherResult.BridgeBatchDepositMessage); updateErr != nil {
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
return
}
c.updateL2SyncHeight(to, lastBlockHash)
c.l2MessageFetcherRunningTotal.Inc()
}

View File

@@ -11,14 +11,16 @@ import (
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
// EventUpdateLogic the logic of insert/update the database
type EventUpdateLogic struct {
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
bridgeBatchDepositEventOrm *orm.BridgeBatchDepositEvent
eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight prometheus.Gauge
eventUpdateLogicL2MessageNonceUpdateHeight prometheus.Gauge
@@ -27,9 +29,10 @@ type EventUpdateLogic struct {
// NewEventUpdateLogic creates a EventUpdateLogic instance
func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
b := &EventUpdateLogic{
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
bridgeBatchDepositEventOrm: orm.NewBridgeBatchDepositEvent(db),
}
if !isL1 {
@@ -48,30 +51,42 @@ func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
}
// GetL1SyncHeight gets the l1 sync height from db
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, error) {
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL1SentMessage)
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, uint64, error) {
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, btypes.MessageTypeL1SentMessage)
if err != nil {
log.Error("failed to get L1 cross message synced height", "error", err)
return 0, 0, err
return 0, 0, 0, err
}
batchSyncedHeight, err := b.batchEventOrm.GetBatchEventSyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get L1 batch event synced height", "error", err)
return 0, 0, err
return 0, 0, 0, err
}
return messageSyncedHeight, batchSyncedHeight, nil
bridgeBatchDepositSyncedHeight, err := b.bridgeBatchDepositEventOrm.GetMessageL1SyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get l1 bridge batch deposit synced height", "error", err)
return 0, 0, 0, err
}
return messageSyncedHeight, batchSyncedHeight, bridgeBatchDepositSyncedHeight, nil
}
// GetL2MessageSyncedHeightInDB gets L2 messages synced height
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, error) {
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL2SentMessage)
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, uint64, error) {
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, btypes.MessageTypeL2SentMessage)
if err != nil {
log.Error("failed to get L2 cross message processed height", "err", err)
return 0, err
return 0, 0, err
}
return l2SentMessageSyncedHeight, nil
l2BridgeBatchDepositSyncHeight, err := b.bridgeBatchDepositEventOrm.GetMessageL2SyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get bridge batch deposit processed height", "err", err)
return 0, 0, err
}
return l2SentMessageSyncedHeight, l2BridgeBatchDepositSyncHeight, nil
}
// L1InsertOrUpdate inserts or updates l1 messages
@@ -100,6 +115,12 @@ func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult
log.Error("failed to insert failed L1 gateway transactions", "err", err)
return err
}
if err := b.bridgeBatchDepositEventOrm.InsertOrUpdateL1BridgeBatchDepositEvent(ctx, l1FetcherResult.BridgeBatchDepositEvents); err != nil {
log.Error("failed to insert L1 bridge batch deposit transactions", "err", err)
return err
}
return nil
}
@@ -139,7 +160,7 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
for i, message := range l2WithdrawMessages {
message.MerkleProof = proofs[i]
message.RollupStatus = int(orm.RollupStatusTypeFinalized)
message.RollupStatus = int(btypes.RollupStatusTypeFinalized)
message.BatchIndex = batchIndex
}
@@ -175,6 +196,30 @@ func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, heig
return nil
}
// UpdateL2BridgeBatchDepositEvent update l2 bridge batch deposit status
func (b *EventUpdateLogic) UpdateL2BridgeBatchDepositEvent(ctx context.Context, l2BatchDistributes []*orm.BridgeBatchDepositEvent) error {
distributeFailedMap := make(map[uint64][]string)
for _, l2BatchDistribute := range l2BatchDistributes {
if btypes.TxStatusType(l2BatchDistribute.TxStatus) == btypes.TxStatusBridgeBatchDistributeFailed {
distributeFailedMap[l2BatchDistribute.BatchIndex] = append(distributeFailedMap[l2BatchDistribute.BatchIndex], l2BatchDistribute.Sender)
}
if err := b.bridgeBatchDepositEventOrm.UpdateBatchEventStatus(ctx, l2BatchDistribute); err != nil {
log.Error("failed to update L1 bridge batch distribute event", "batchIndex", l2BatchDistribute.BatchIndex, "err", err)
return err
}
}
for batchIndex, distributeFailedSenders := range distributeFailedMap {
if err := b.bridgeBatchDepositEventOrm.UpdateDistributeFailedStatus(ctx, batchIndex, distributeFailedSenders); err != nil {
log.Error("failed to update L1 bridge batch distribute failed event", "batchIndex", batchIndex, "failed senders", distributeFailedSenders, "err", err)
return err
}
}
return nil
}
// L2InsertOrUpdate inserts or updates L2 messages
func (b *EventUpdateLogic) L2InsertOrUpdate(ctx context.Context, l2FetcherResult *L2FilterResult) error {
if err := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages); err != nil {

View File

@@ -16,6 +16,7 @@ import (
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/types"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -35,20 +36,23 @@ const (
// HistoryLogic services.
type HistoryLogic struct {
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
redis *redis.Client
singleFlight singleflight.Group
cacheMetrics *cacheMetrics
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
bridgeBatchDepositOrm *orm.BridgeBatchDepositEvent
redis *redis.Client
singleFlight singleflight.Group
cacheMetrics *cacheMetrics
}
// NewHistoryLogic returns bridge history services.
func NewHistoryLogic(db *gorm.DB, redis *redis.Client) *HistoryLogic {
logic := &HistoryLogic{
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
redis: redis,
cacheMetrics: initCacheMetrics(),
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
bridgeBatchDepositOrm: orm.NewBridgeBatchDepositEvent(db),
redis: redis,
cacheMetrics: initCacheMetrics(),
}
return logic
}
@@ -72,25 +76,28 @@ func (h *HistoryLogic) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, a
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
if err != nil {
return nil, err
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
}
return messages, nil
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
return txHistoryInfos, nil
})
if err != nil {
log.Error("failed to get L2 claimable withdrawals by address", "address", address, "error", err)
return nil, 0, err
}
messages, ok := result.([]*orm.CrossMessage)
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
}
// GetL2WithdrawalsByAddress gets all withdrawal txs under given address.
@@ -112,25 +119,28 @@ func (h *HistoryLogic) GetL2WithdrawalsByAddress(ctx context.Context, address st
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
if err != nil {
return nil, err
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
}
return messages, nil
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
return txHistoryInfos, nil
})
if err != nil {
log.Error("failed to get L2 withdrawals by address", "address", address, "error", err)
return nil, 0, err
}
messages, ok := result.([]*orm.CrossMessage)
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
}
// GetTxsByAddress gets tx infos under given address.
@@ -152,25 +162,36 @@ func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address string, page
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetTxsByAddress(ctx, address)
if err != nil {
return nil, err
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetTxsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
}
return messages, nil
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
batchDepositMessages, getErr := h.bridgeBatchDepositOrm.GetTxsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
}
for _, message := range batchDepositMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromBridgeBatchDepositMessage(message))
}
return txHistoryInfos, nil
})
if err != nil {
log.Error("failed to get txs by address", "address", address, "error", err)
return nil, 0, err
}
messages, ok := result.([]*orm.CrossMessage)
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
}
// GetTxsByHashes gets tx infos under given tx hashes.
@@ -218,15 +239,24 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([
}
if len(uncachedHashes) > 0 {
messages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
var txHistories []*types.TxHistoryInfo
crossMessages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get messages by tx hashes", "hashes", uncachedHashes)
log.Error("failed to get cross messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
for _, message := range crossMessages {
txHistories = append(txHistories, getTxHistoryInfoFromCrossMessage(message))
}
var txHistories []*types.TxHistoryInfo
for _, message := range messages {
txHistories = append(txHistories, getTxHistoryInfo(message))
batchDepositMessages, err := h.bridgeBatchDepositOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get batch deposit messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
for _, message := range batchDepositMessages {
txHistories = append(txHistories, getTxHistoryInfoFromBridgeBatchDepositMessage(message))
}
resultMap := make(map[string]*types.TxHistoryInfo)
@@ -260,19 +290,19 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([
return results, nil
}
func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
func getTxHistoryInfoFromCrossMessage(message *orm.CrossMessage) *types.TxHistoryInfo {
txHistory := &types.TxHistoryInfo{
MessageHash: message.MessageHash,
TokenType: orm.TokenType(message.TokenType),
TokenType: btypes.TokenType(message.TokenType),
TokenIDs: utils.ConvertStringToStringArray(message.TokenIDs),
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmounts),
L1TokenAddress: message.L1TokenAddress,
L2TokenAddress: message.L2TokenAddress,
MessageType: orm.MessageType(message.MessageType),
TxStatus: orm.TxStatusType(message.TxStatus),
MessageType: btypes.MessageType(message.MessageType),
TxStatus: btypes.TxStatusType(message.TxStatus),
BlockTimestamp: message.BlockTimestamp,
}
if txHistory.MessageType == orm.MessageTypeL1SentMessage {
if txHistory.MessageType == btypes.MessageTypeL1SentMessage {
txHistory.Hash = message.L1TxHash
txHistory.ReplayTxHash = message.L1ReplayTxHash
txHistory.RefundTxHash = message.L1RefundTxHash
@@ -288,7 +318,7 @@ func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
Hash: message.L1TxHash,
BlockNumber: message.L1BlockNumber,
}
if orm.RollupStatusType(message.RollupStatus) == orm.RollupStatusTypeFinalized {
if btypes.RollupStatusType(message.RollupStatus) == btypes.RollupStatusTypeFinalized {
txHistory.ClaimInfo = &types.ClaimInfo{
From: message.MessageFrom,
To: message.MessageTo,
@@ -306,6 +336,28 @@ func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
return txHistory
}
func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepositEvent) *types.TxHistoryInfo {
txHistory := &types.TxHistoryInfo{
Hash: message.L1TxHash,
TokenType: btypes.TokenType(message.TokenType),
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmount),
BlockNumber: message.L1BlockNumber,
MessageType: btypes.MessageTypeL1BatchDeposit,
TxStatus: btypes.TxStatusType(message.TxStatus),
CounterpartChainTx: &types.CounterpartChainTx{
Hash: message.L2TxHash,
BlockNumber: message.L2BlockNumber,
},
BlockTimestamp: message.BlockTimestamp,
BatchDepositFee: message.Fee,
}
if txHistory.TokenType != btypes.TokenTypeETH {
txHistory.L1TokenAddress = message.L1TokenAddress
txHistory.L2TokenAddress = message.L2TokenAddress
}
return txHistory
}
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
start := int64((pageNum - 1) * pageSize)
end := start + int64(pageSize) - 1
@@ -320,7 +372,7 @@ func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pa
return nil, 0, false, nil
}
values, err := h.redis.ZRange(ctx, cacheKey, start, end).Result()
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
if err != nil {
log.Error("failed to get zrange result", "error", err)
return nil, 0, false, err
@@ -356,13 +408,13 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
}
} else {
// The transactions are sorted, thus we set the score as their indices.
for i, tx := range txs {
for _, tx := range txs {
txBytes, err := json.Marshal(tx)
if err != nil {
log.Error("failed to marshal transaction to json", "error", err)
return err
}
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(i), Member: txBytes}).Err(); err != nil {
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(tx.BlockTimestamp), Member: txBytes}).Err(); err != nil {
log.Error("failed to add transaction to sorted set", "error", err)
return err
}
@@ -381,12 +433,7 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
return nil
}
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, messages []*orm.CrossMessage, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
var txHistories []*types.TxHistoryInfo
for _, message := range messages {
txHistories = append(txHistories, getTxHistoryInfo(message))
}
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, txHistories []*types.TxHistoryInfo, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
err := h.cacheTxsInfo(ctx, cacheKey, txHistories)
if err != nil {
log.Error("failed to cache txs info", "key", cacheKey, "err", err)

View File

@@ -13,6 +13,7 @@ import (
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -30,8 +31,60 @@ func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1Ev
}
}
// ParseL1CrossChainEventLogs parses L1 watched cross chain events.
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
// ParseL1CrossChainEventLogs parse l1 cross chain event logs
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, []*orm.BridgeBatchDepositEvent, error) {
l1CrossChainDepositMessages, l1CrossChainRelayedMessages, err := e.ParseL1SingleCrossChainEventLogs(ctx, logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
l1BridgeBatchDepositMessages, err := e.ParseL1BridgeBatchDepositCrossChainEventLogs(logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
return l1CrossChainDepositMessages, l1CrossChainRelayedMessages, l1BridgeBatchDepositMessages, nil
}
// ParseL1BridgeBatchDepositCrossChainEventLogs parse L1 watched batch bridge cross chain events.
func (e *L1EventParser) ParseL1BridgeBatchDepositCrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.BridgeBatchDepositEvent, error) {
var l1BridgeBatchDepositMessages []*orm.BridgeBatchDepositEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1BridgeBatchDepositSig:
event := backendabi.L1BatchBridgeGatewayDeposit{}
if err := utils.UnpackLog(backendabi.L1BatchBridgeGatewayABI, &event, "Deposit", vlog); err != nil {
log.Error("Failed to unpack batch bridge gateway deposit event", "err", err)
return nil, err
}
var tokenType btypes.TokenType
if event.Token == common.HexToAddress("0") {
tokenType = btypes.TokenTypeETH
} else {
tokenType = btypes.TokenTypeERC20
}
l1BridgeBatchDepositMessages = append(l1BridgeBatchDepositMessages, &orm.BridgeBatchDepositEvent{
TokenType: int(tokenType),
Sender: event.Sender.String(),
BatchIndex: event.BatchIndex.Uint64(),
TokenAmount: event.Amount.String(),
Fee: event.Fee.String(),
L1TokenAddress: event.Token.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDeposit),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
L1LogIndex: vlog.Index,
})
}
}
return l1BridgeBatchDepositMessages, nil
}
// ParseL1SingleCrossChainEventLogs parses L1 watched single cross chain events.
func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l1DepositMessages []*orm.CrossMessage
var l1RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
@@ -45,7 +98,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeETH)
lastMessage.TokenType = int(btypes.TokenTypeETH)
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{}
@@ -57,7 +110,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC20)
lastMessage.TokenType = int(btypes.TokenTypeERC20)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenAmounts = event.Amount.String()
@@ -70,7 +123,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -83,7 +136,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
@@ -96,7 +149,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -110,7 +163,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
@@ -130,12 +183,12 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
L1BlockNumber: vlog.BlockNumber,
Sender: from,
Receiver: event.Target.String(),
TokenType: int(orm.TokenTypeETH),
TokenType: int(btypes.TokenTypeETH),
L1TxHash: vlog.TxHash.String(),
TokenAmounts: event.Value.String(),
MessageNonce: event.MessageNonce.Uint64(),
MessageType: int(orm.MessageTypeL1SentMessage),
TxStatus: int(orm.TxStatusTypeSent),
MessageType: int(btypes.MessageTypeL1SentMessage),
TxStatus: int(btypes.TxStatusTypeSent),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
})
@@ -149,8 +202,8 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
MessageHash: event.MessageHash.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(orm.TxStatusTypeRelayed),
MessageType: int(orm.MessageTypeL2SentMessage),
TxStatus: int(btypes.TxStatusTypeRelayed),
MessageType: int(btypes.MessageTypeL2SentMessage),
})
case backendabi.L1FailedRelayedMessageEventSig:
event := backendabi.L1FailedRelayedMessageEvent{}
@@ -162,8 +215,8 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
MessageHash: event.MessageHash.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(orm.TxStatusTypeFailedRelayed),
MessageType: int(orm.MessageTypeL2SentMessage),
TxStatus: int(btypes.TxStatusTypeFailedRelayed),
MessageType: int(btypes.MessageTypeL2SentMessage),
})
}
}
@@ -192,7 +245,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(orm.BatchStatusTypeCommitted),
BatchStatus: int(btypes.BatchStatusTypeCommitted),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
StartBlockNumber: startBlock,
@@ -206,7 +259,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(orm.BatchStatusTypeReverted),
BatchStatus: int(btypes.BatchStatusTypeReverted),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
@@ -218,7 +271,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(orm.BatchStatusTypeFinalized),
BatchStatus: int(btypes.BatchStatusTypeFinalized),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
@@ -248,7 +301,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
// If the message hash is not found in the map, it's not a replayMessage or enforced tx (omitted); add it to the events.
if _, exists := messageHashes[messageHash]; !exists {
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: orm.MessageQueueEventTypeQueueTransaction,
EventType: btypes.MessageQueueEventTypeQueueTransaction,
QueueIndex: event.QueueIndex,
MessageHash: messageHash,
TxHash: vlog.TxHash,
@@ -263,7 +316,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
skippedIndices := utils.GetSkippedQueueIndices(event.StartIndex.Uint64(), event.SkippedBitmap)
for _, index := range skippedIndices {
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: orm.MessageQueueEventTypeDequeueTransaction,
EventType: btypes.MessageQueueEventTypeDequeueTransaction,
QueueIndex: index,
})
}
@@ -274,7 +327,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: orm.MessageQueueEventTypeDropTransaction,
EventType: btypes.MessageQueueEventTypeDropTransaction,
QueueIndex: event.Index.Uint64(),
TxHash: vlog.TxHash,
})

View File

@@ -16,6 +16,7 @@ import (
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -25,11 +26,12 @@ const L1ReorgSafeDepth = 64
// L1FilterResult L1 fetcher result
type L1FilterResult struct {
DepositMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage
BatchEvents []*orm.BatchEvent
MessageQueueEvents []*orm.MessageQueueEvent
RevertedTxs []*orm.CrossMessage
DepositMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage
BatchEvents []*orm.BatchEvent
MessageQueueEvents []*orm.MessageQueueEvent
RevertedTxs []*orm.CrossMessage
BridgeBatchDepositEvents []*orm.BridgeBatchDepositEvent
}
// L1FetcherLogic the L1 fetcher logic
@@ -82,7 +84,7 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.GatewayRouterAddr),
}
// Optional erc20 gateways.
// Optional gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
@@ -98,6 +100,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
if common.HexToAddress(cfg.BatchBridgeGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L1FetcherLogic{
@@ -183,12 +190,12 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
L1TxHash: tx.Hash().String(),
MessageType: int(orm.MessageTypeL1SentMessage),
MessageType: int(btypes.MessageTypeL1SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L1BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(orm.TxStatusTypeSentTxReverted),
TxStatus: int(btypes.TxStatusTypeSentTxReverted),
})
}
}
@@ -203,7 +210,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 13)
query.Topics[0] = make([]common.Hash, 14)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1DepositERC721Sig
@@ -217,6 +224,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
query.Topics[0][13] = backendabi.L1BridgeBatchDepositSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
@@ -252,7 +260,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
return false, 0, common.Hash{}, nil, err
}
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
l1DepositMessages, l1RelayedMessages, l1BridgeBatchDepositMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L1 cross chain event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
@@ -271,11 +279,12 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
}
res := L1FilterResult{
DepositMessages: l1DepositMessages,
RelayedMessages: l1RelayedMessages,
BatchEvents: l1BatchEvents,
MessageQueueEvents: l1MessageQueueEvents,
RevertedTxs: l1RevertedTxs,
DepositMessages: l1DepositMessages,
RelayedMessages: l1RelayedMessages,
BatchEvents: l1BatchEvents,
MessageQueueEvents: l1MessageQueueEvents,
RevertedTxs: l1RevertedTxs,
BridgeBatchDepositEvents: l1BridgeBatchDepositMessages,
}
f.updateMetrics(res)
@@ -287,23 +296,23 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_gateway_router_transaction").Add(float64(len(res.RevertedTxs)))
for _, depositMessage := range res.DepositMessages {
switch orm.TokenType(depositMessage.TokenType) {
case orm.TokenTypeETH:
switch btypes.TokenType(depositMessage.TokenType) {
case btypes.TokenTypeETH:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_eth").Add(1)
case orm.TokenTypeERC20:
case btypes.TokenTypeERC20:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc20").Add(1)
case orm.TokenTypeERC721:
case btypes.TokenTypeERC721:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc721").Add(1)
case orm.TokenTypeERC1155:
case btypes.TokenTypeERC1155:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc1155").Add(1)
}
}
for _, relayedMessage := range res.RelayedMessages {
switch orm.TxStatusType(relayedMessage.TxStatus) {
case orm.TxStatusTypeRelayed:
switch btypes.TxStatusType(relayedMessage.TxStatus) {
case btypes.TxStatusTypeRelayed:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_relayed_message").Add(1)
case orm.TxStatusTypeFailedRelayed:
case btypes.TxStatusTypeFailedRelayed:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_relayed_message").Add(1)
}
// Have not tracked L1 relayed message reverted transaction yet.
@@ -312,24 +321,33 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
}
for _, batchEvent := range res.BatchEvents {
switch orm.BatchStatusType(batchEvent.BatchStatus) {
case orm.BatchStatusTypeCommitted:
switch btypes.BatchStatusType(batchEvent.BatchStatus) {
case btypes.BatchStatusTypeCommitted:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_commit_batch_event").Add(1)
case orm.BatchStatusTypeReverted:
case btypes.BatchStatusTypeReverted:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_revert_batch_event").Add(1)
case orm.BatchStatusTypeFinalized:
case btypes.BatchStatusTypeFinalized:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_finalize_batch_event").Add(1)
}
}
for _, messageQueueEvent := range res.MessageQueueEvents {
switch messageQueueEvent.EventType {
case orm.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
case btypes.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_replay_message_or_enforced_transaction").Add(1)
case orm.MessageQueueEventTypeDequeueTransaction:
case btypes.MessageQueueEventTypeDequeueTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1)
case orm.MessageQueueEventTypeDropTransaction:
case btypes.MessageQueueEventTypeDropTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1)
}
}
for _, bridgeBatchDepositEvent := range res.BridgeBatchDepositEvents {
switch btypes.TokenType(bridgeBatchDepositEvent.TokenType) {
case btypes.TokenTypeETH:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_bridge_batch_deposit_eth").Add(1)
case btypes.TokenTypeERC20:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_bridge_batch_deposit_erc20").Add(1)
}
}
}

View File

@@ -3,6 +3,7 @@ package logic
import (
"context"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
@@ -11,6 +12,7 @@ import (
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -28,8 +30,72 @@ func NewL2EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L2Ev
}
}
// ParseL2EventLogs parses L2 watched events
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
// ParseL2EventLogs parses L2 watchedevents
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, []*orm.BridgeBatchDepositEvent, error) {
l2WithdrawMessages, l2RelayedMessages, err := e.ParseL2SingleCrossChainEventLogs(ctx, logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
l2BridgeBatchDepositMessages, err := e.ParseL2BridgeBatchDepositCrossChainEventLogs(logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
return l2WithdrawMessages, l2RelayedMessages, l2BridgeBatchDepositMessages, nil
}
// ParseL2BridgeBatchDepositCrossChainEventLogs parses L2 watched bridge batch deposit events
func (e *L2EventParser) ParseL2BridgeBatchDepositCrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.BridgeBatchDepositEvent, error) {
var l2BridgeBatchDepositEvents []*orm.BridgeBatchDepositEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2BridgeBatchDistributeSig:
event := backendabi.L2BatchBridgeGatewayBatchDistribute{}
err := utils.UnpackLog(backendabi.L2BatchBridgeGatewayABI, &event, "BatchDistribute", vlog)
if err != nil {
log.Error("Failed to unpack BatchDistribute event", "err", err)
return nil, err
}
var tokenType btypes.TokenType
if event.L1Token == common.HexToAddress("0") {
tokenType = btypes.TokenTypeETH
} else {
tokenType = btypes.TokenTypeERC20
}
l2BridgeBatchDepositEvents = append(l2BridgeBatchDepositEvents, &orm.BridgeBatchDepositEvent{
TokenType: int(tokenType),
BatchIndex: event.BatchIndex.Uint64(),
L2TokenAddress: event.L2Token.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDistribute),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
})
case backendabi.L2BridgeBatchDistributeFailedSig:
event := backendabi.L2BatchBridgeGatewayDistributeFailed{}
err := utils.UnpackLog(backendabi.L2BatchBridgeGatewayABI, &event, "DistributeFailed", vlog)
if err != nil {
log.Error("Failed to unpack DistributeFailed event", "err", err)
return nil, err
}
l2BridgeBatchDepositEvents = append(l2BridgeBatchDepositEvents, &orm.BridgeBatchDepositEvent{
BatchIndex: event.BatchIndex.Uint64(),
L2TokenAddress: event.L2Token.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDistributeFailed),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
Sender: event.Receiver.String(),
})
}
}
return l2BridgeBatchDepositEvents, nil
}
// ParseL2SingleCrossChainEventLogs parses L2 watched events
func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l2WithdrawMessages []*orm.CrossMessage
var l2RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
@@ -44,7 +110,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeETH)
lastMessage.TokenType = int(btypes.TokenTypeETH)
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{}
@@ -56,7 +122,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC20)
lastMessage.TokenType = int(btypes.TokenTypeERC20)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenAmounts = event.Amount.String()
@@ -70,7 +136,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -84,7 +150,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
@@ -98,7 +164,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -113,7 +179,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
@@ -134,7 +200,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
Sender: from,
Receiver: event.Target.String(),
TokenType: int(orm.TokenTypeETH),
TokenType: int(btypes.TokenTypeETH),
L2TxHash: vlog.TxHash.String(),
TokenAmounts: event.Value.String(),
MessageFrom: event.Sender.String(),
@@ -142,8 +208,8 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
MessageValue: event.Value.String(),
MessageNonce: event.MessageNonce.Uint64(),
MessageData: hexutil.Encode(event.Message),
MessageType: int(orm.MessageTypeL2SentMessage),
TxStatus: int(orm.TxStatusTypeSent),
MessageType: int(btypes.MessageTypeL2SentMessage),
TxStatus: int(btypes.TxStatusTypeSent),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
L2BlockNumber: vlog.BlockNumber,
})
@@ -158,8 +224,8 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
MessageHash: event.MessageHash.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(orm.TxStatusTypeRelayed),
MessageType: int(orm.MessageTypeL1SentMessage),
TxStatus: int(btypes.TxStatusTypeRelayed),
MessageType: int(btypes.MessageTypeL1SentMessage),
})
case backendabi.L2FailedRelayedMessageEventSig:
event := backendabi.L2RelayedMessageEvent{}
@@ -172,8 +238,8 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
MessageHash: event.MessageHash.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(orm.TxStatusTypeFailedRelayed),
MessageType: int(orm.MessageTypeL1SentMessage),
TxStatus: int(btypes.TxStatusTypeFailedRelayed),
MessageType: int(btypes.MessageTypeL1SentMessage),
})
}
}

View File

@@ -17,6 +17,7 @@ import (
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -26,9 +27,10 @@ const L2ReorgSafeDepth = 256
// L2FilterResult the L2 filter result
type L2FilterResult struct {
WithdrawMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
WithdrawMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
BridgeBatchDepositMessage []*orm.BridgeBatchDepositEvent
}
// L2FetcherLogic the L2 fetcher logic
@@ -77,7 +79,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.GatewayRouterAddr),
}
// Optional erc20 gateways.
// Optional gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
@@ -93,6 +95,11 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
if common.HexToAddress(cfg.BatchBridgeGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L2FetcherLogic{
@@ -164,9 +171,9 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
l2RevertedRelayedMessageTxs = append(l2RevertedRelayedMessageTxs, &orm.CrossMessage{
MessageHash: common.BytesToHash(crypto.Keccak256(tx.AsL1MessageTx().Data)).String(),
L2TxHash: tx.Hash().String(),
TxStatus: int(orm.TxStatusTypeRelayTxReverted),
TxStatus: int(btypes.TxStatusTypeRelayTxReverted),
L2BlockNumber: receipt.BlockNumber.Uint64(),
MessageType: int(orm.MessageTypeL1SentMessage),
MessageType: int(btypes.MessageTypeL1SentMessage),
})
}
continue
@@ -194,12 +201,12 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
L2TxHash: tx.Hash().String(),
MessageType: int(orm.MessageTypeL2SentMessage),
MessageType: int(btypes.MessageTypeL2SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L2BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(orm.TxStatusTypeSentTxReverted),
TxStatus: int(btypes.TxStatusTypeSentTxReverted),
})
}
}
@@ -214,7 +221,7 @@ func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]
Addresses: f.addressList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0] = make([]common.Hash, 9)
query.Topics[0][0] = backendabi.L2WithdrawETHSig
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
query.Topics[0][2] = backendabi.L2WithdrawERC721Sig
@@ -222,6 +229,8 @@ func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]
query.Topics[0][4] = backendabi.L2SentMessageEventSig
query.Topics[0][5] = backendabi.L2RelayedMessageEventSig
query.Topics[0][6] = backendabi.L2FailedRelayedMessageEventSig
query.Topics[0][7] = backendabi.L2BridgeBatchDistributeSig
query.Topics[0][8] = backendabi.L2BridgeBatchDistributeFailedSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
@@ -257,16 +266,17 @@ func (f *L2FetcherLogic) L2Fetcher(ctx context.Context, from, to uint64, lastBlo
return false, 0, common.Hash{}, nil, err
}
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
l2WithdrawMessages, l2RelayedMessages, l2BridgeBatchDepositMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L2 event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
res := L2FilterResult{
WithdrawMessages: l2WithdrawMessages,
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
OtherRevertedTxs: revertedUserTxs,
WithdrawMessages: l2WithdrawMessages,
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
OtherRevertedTxs: revertedUserTxs,
BridgeBatchDepositMessage: l2BridgeBatchDepositMessages,
}
f.updateMetrics(res)
@@ -278,28 +288,37 @@ func (f *L2FetcherLogic) updateMetrics(res L2FilterResult) {
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_gateway_router_transaction").Add(float64(len(res.OtherRevertedTxs)))
for _, withdrawMessage := range res.WithdrawMessages {
switch orm.TokenType(withdrawMessage.TokenType) {
case orm.TokenTypeETH:
switch btypes.TokenType(withdrawMessage.TokenType) {
case btypes.TokenTypeETH:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_eth").Add(1)
case orm.TokenTypeERC20:
case btypes.TokenTypeERC20:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc20").Add(1)
case orm.TokenTypeERC721:
case btypes.TokenTypeERC721:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc721").Add(1)
case orm.TokenTypeERC1155:
case btypes.TokenTypeERC1155:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc1155").Add(1)
}
}
for _, relayedMessage := range res.RelayedMessages {
switch orm.TxStatusType(relayedMessage.TxStatus) {
case orm.TxStatusTypeRelayed:
switch btypes.TxStatusType(relayedMessage.TxStatus) {
case btypes.TxStatusTypeRelayed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_relayed_message").Add(1)
case orm.TxStatusTypeFailedRelayed:
case btypes.TxStatusTypeFailedRelayed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_relayed_message").Add(1)
case orm.TxStatusTypeRelayTxReverted:
case btypes.TxStatusTypeRelayTxReverted:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_reverted_relayed_message_transaction").Add(1)
}
}
for _, bridgeBatchDepositMessage := range res.BridgeBatchDepositMessage {
switch btypes.TxStatusType(bridgeBatchDepositMessage.TxStatus) {
case btypes.TxStatusBridgeBatchDistribute:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_bridge_batch_distribute_message").Add(1)
case btypes.TxStatusBridgeBatchDistributeFailed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_bridge_batch_distribute_failed_message").Add(1)
}
}
}
func isTransactionToGateway(tx *types.Transaction, gatewayList []common.Address) bool {

View File

@@ -7,26 +7,8 @@ import (
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// BatchStatusType represents the type of batch status.
type BatchStatusType int
// Constants for BatchStatusType.
const (
BatchStatusTypeUnknown BatchStatusType = iota
BatchStatusTypeCommitted
BatchStatusTypeReverted
BatchStatusTypeFinalized
)
// UpdateStatusType represents the whether batch info is updated in message table.
type UpdateStatusType int
// Constants for UpdateStatusType.
const (
UpdateStatusTypeUnupdated UpdateStatusType = iota
UpdateStatusTypeUpdated
btypes "scroll-tech/bridge-history-api/internal/types"
)
// BatchEvent represents a batch event.
@@ -77,8 +59,8 @@ func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, block
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("end_block_number <= ?", blockHeight)
db = db.Where("batch_status = ?", BatchStatusTypeFinalized)
db = db.Where("update_status = ?", UpdateStatusTypeUnupdated)
db = db.Where("batch_status = ?", btypes.BatchStatusTypeFinalized)
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
db = db.Order("batch_index asc")
if err := db.Find(&batches).Error; err != nil {
if err == gorm.ErrRecordNotFound {
@@ -96,8 +78,8 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
db = db.WithContext(ctx)
db = db.Model(&BatchEvent{})
updateFields := make(map[string]interface{})
switch BatchStatusType(l1BatchEvent.BatchStatus) {
case BatchStatusTypeCommitted:
switch btypes.BatchStatusType(l1BatchEvent.BatchStatus) {
case btypes.BatchStatusTypeCommitted:
// Use the clause to either insert or ignore on conflict
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "batch_hash"}},
@@ -106,17 +88,17 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
if err := db.Create(l1BatchEvent).Error; err != nil {
return fmt.Errorf("failed to insert or ignore batch event, error: %w", err)
}
case BatchStatusTypeFinalized:
case btypes.BatchStatusTypeFinalized:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = BatchStatusTypeFinalized
updateFields["batch_status"] = btypes.BatchStatusTypeFinalized
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
case BatchStatusTypeReverted:
case btypes.BatchStatusTypeReverted:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = BatchStatusTypeReverted
updateFields["batch_status"] = btypes.BatchStatusTypeReverted
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
@@ -135,7 +117,7 @@ func (c *BatchEvent) UpdateBatchEventStatus(ctx context.Context, batchIndex uint
db = db.Model(&BatchEvent{})
db = db.Where("batch_index = ?", batchIndex)
updateFields := map[string]interface{}{
"update_status": UpdateStatusTypeUpdated,
"update_status": btypes.UpdateStatusTypeUpdated,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event status, batchIndex: %d, error: %w", batchIndex, err)

View File

@@ -0,0 +1,163 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/bridge-history-api/internal/types"
)
// BridgeBatchDepositEvent represents the bridge batch deposit event.
type BridgeBatchDepositEvent struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id;primary_key"`
TokenType int `json:"token_type" gorm:"column:token_type"`
Sender string `json:"sender" gorm:"column:sender"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
TokenAmount string `json:"token_amount" gorm:"column:token_amount"`
Fee string `json:"fee" gorm:"column:fee"`
L1TokenAddress string `json:"l1_token_address" gorm:"column:l1_token_address"`
L2TokenAddress string `json:"l2_token_address" gorm:"column:l2_token_address"`
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
L2BlockNumber uint64 `json:"l2_block_number" gorm:"column:l2_block_number"`
L1TxHash string `json:"l1_tx_hash" gorm:"column:l1_tx_hash"`
L1LogIndex uint `json:"l1_log_index" gorm:"column:l1_log_index"`
L2TxHash string `json:"l2_tx_hash" gorm:"column:l2_tx_hash"`
TxStatus int `json:"tx_status" gorm:"column:tx_status"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"column:block_timestamp"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
}
// TableName returns the table name for the BridgeBatchDepositEvent model.
func (*BridgeBatchDepositEvent) TableName() string {
return "bridge_batch_deposit_event_v2"
}
// NewBridgeBatchDepositEvent returns a new instance of BridgeBatchDepositEvent.
func NewBridgeBatchDepositEvent(db *gorm.DB) *BridgeBatchDepositEvent {
return &BridgeBatchDepositEvent{db: db}
}
// GetTxsByAddress returns the txs by address
func (c *BridgeBatchDepositEvent) GetTxsByAddress(ctx context.Context, sender string) ([]*BridgeBatchDepositEvent, error) {
var messages []*BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get all txs by sender address, sender: %v, error: %w", sender, err)
}
return messages, nil
}
// GetMessagesByTxHashes retrieves all BridgeBatchDepositEvent from the database that match the provided transaction hashes.
func (c *BridgeBatchDepositEvent) GetMessagesByTxHashes(ctx context.Context, txHashes []string) ([]*BridgeBatchDepositEvent, error) {
var messages []*BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("l1_tx_hash in (?) or l2_tx_hash in (?)", txHashes, txHashes)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to GetMessagesByTxHashes by tx hashes, tx hashes: %v, error: %w", txHashes, err)
}
return messages, nil
}
// GetMessageL1SyncedHeightInDB returns the l1 latest bridge batch deposit message height from the database
func (c *BridgeBatchDepositEvent) GetMessageL1SyncedHeightInDB(ctx context.Context) (uint64, error) {
var message BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Order("l1_block_number desc")
err := db.First(&message).Error
if err != nil && errors.Is(gorm.ErrRecordNotFound, err) {
return 0, nil
}
if err != nil {
return 0, fmt.Errorf("failed to get l1 latest processed height, error: %w", err)
}
return message.L1BlockNumber, nil
}
// GetMessageL2SyncedHeightInDB returns the l2 latest bridge batch deposit message height from the database
func (c *BridgeBatchDepositEvent) GetMessageL2SyncedHeightInDB(ctx context.Context) (uint64, error) {
var message BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Order("l2_block_number desc")
err := db.First(&message).Error
if err != nil && errors.Is(gorm.ErrRecordNotFound, err) {
return 0, nil
}
if err != nil {
return 0, fmt.Errorf("failed to get l2 latest processed height, error: %w", err)
}
return message.L2BlockNumber, nil
}
// InsertOrUpdateL1BridgeBatchDepositEvent inserts or updates a new L1 BridgeBatchDepositEvent
func (c *BridgeBatchDepositEvent) InsertOrUpdateL1BridgeBatchDepositEvent(ctx context.Context, l1BatchDepositEvents []*BridgeBatchDepositEvent) error {
if len(l1BatchDepositEvents) == 0 {
return nil
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "l1_tx_hash"}, {Name: "l1_log_index"}},
DoUpdates: clause.AssignmentColumns([]string{"token_amount", "fee", "l1_block_number", "l1_token_address", "tx_status", "block_timestamp"}),
})
if err := db.Create(l1BatchDepositEvents).Error; err != nil {
return fmt.Errorf("failed to insert message, error: %w", err)
}
return nil
}
// UpdateBatchEventStatus updates the tx_status of BridgeBatchDepositEvent given batch index
func (c *BridgeBatchDepositEvent) UpdateBatchEventStatus(ctx context.Context, distributeMessage *BridgeBatchDepositEvent) error {
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("batch_index = ?", distributeMessage.BatchIndex)
db = db.Where("token_type = ?", distributeMessage.TokenType)
updateFields := map[string]interface{}{
"l2_token_address": distributeMessage.L2TokenAddress,
"l2_block_number": distributeMessage.L2BlockNumber,
"l2_tx_hash": distributeMessage.L2TxHash,
"tx_status": types.TxStatusBridgeBatchDistribute,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to UpdateBatchEventStatus, batchIndex: %d, error: %w", distributeMessage.BatchIndex, err)
}
return nil
}
// UpdateDistributeFailedStatus updates the tx_status of BridgeBatchDepositEvent given batch index and senders
func (c *BridgeBatchDepositEvent) UpdateDistributeFailedStatus(ctx context.Context, batchIndex uint64, senders []string) error {
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("batch_index = ?", batchIndex)
db = db.Where("sender in (?)", senders)
updateFields := map[string]interface{}{
"tx_status": types.TxStatusBridgeBatchDistributeFailed,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to UpdateDistributeFailedStatus, batchIndex: %d, senders:%v, error: %w", batchIndex, senders, err)
}
return nil
}

View File

@@ -8,75 +8,15 @@ import (
"github.com/scroll-tech/go-ethereum/common"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// TokenType represents the type of token.
type TokenType int
"scroll-tech/bridge-history-api/internal/types"
// Constants for TokenType.
const (
TokenTypeUnknown TokenType = iota
TokenTypeETH
TokenTypeERC20
TokenTypeERC721
TokenTypeERC1155
)
// MessageType represents the type of message.
type MessageType int
// Constants for MessageType.
const (
MessageTypeUnknown MessageType = iota
MessageTypeL1SentMessage
MessageTypeL2SentMessage
)
// TxStatusType represents the status of a transaction.
type TxStatusType int
// Constants for TxStatusType.
const (
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
// from a later status (e.g., relayed) back to "sent".
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
TxStatusTypeSent TxStatusType = iota
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
TxStatusTypeRelayed // Terminal status.
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeFailedRelayed
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeRelayTxReverted
TxStatusTypeSkipped
TxStatusTypeDropped // Terminal status.
)
// RollupStatusType represents the status of a rollup.
type RollupStatusType int
// Constants for RollupStatusType.
const (
RollupStatusTypeUnknown RollupStatusType = iota
RollupStatusTypeFinalized // only batch finalized status is used.
)
// MessageQueueEventType represents the type of message queue event.
type MessageQueueEventType int
// Constants for MessageQueueEventType.
const (
MessageQueueEventTypeUnknown MessageQueueEventType = iota
MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction
btypes "scroll-tech/bridge-history-api/internal/types"
)
// MessageQueueEvent struct represents the details of a batch event.
type MessageQueueEvent struct {
EventType MessageQueueEventType
EventType btypes.MessageQueueEventType
QueueIndex uint64
// Track replay tx hash and refund tx hash.
@@ -132,15 +72,15 @@ func NewCrossMessage(db *gorm.DB) *CrossMessage {
}
// GetMessageSyncedHeightInDB returns the latest synced cross message height from the database for a given message type.
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType MessageType) (uint64, error) {
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType btypes.MessageType) (uint64, error) {
var message CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", messageType)
switch {
case messageType == MessageTypeL1SentMessage:
case messageType == btypes.MessageTypeL1SentMessage:
db = db.Order("l1_block_number desc")
case messageType == MessageTypeL2SentMessage:
case messageType == btypes.MessageTypeL2SentMessage:
db = db.Order("l2_block_number desc")
}
if err := db.First(&message).Error; err != nil {
@@ -150,9 +90,9 @@ func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageTy
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
}
switch {
case messageType == MessageTypeL1SentMessage:
case messageType == btypes.MessageTypeL1SentMessage:
return message.L1BlockNumber, nil
case messageType == MessageTypeL2SentMessage:
case messageType == btypes.MessageTypeL2SentMessage:
return message.L2BlockNumber, nil
default:
return 0, fmt.Errorf("invalid message type: %v", messageType)
@@ -164,8 +104,8 @@ func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*Cro
var message CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("rollup_status = ?", RollupStatusTypeFinalized)
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
db = db.Order("message_nonce desc")
if err := db.First(&message).Error; err != nil {
if err == gorm.ErrRecordNotFound {
@@ -183,8 +123,8 @@ func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBl
db = db.Model(&CrossMessage{})
db = db.Where("l2_block_number >= ?", startBlock)
db = db.Where("l2_block_number <= ?", endBlock)
db = db.Where("tx_status != ?", TxStatusTypeSentTxReverted)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("tx_status != ?", types.TxStatusTypeSentTxReverted)
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Order("message_nonce asc")
if err := db.Find(&messages).Error; err != nil {
if err == gorm.ErrRecordNotFound {
@@ -212,8 +152,8 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", TxStatusTypeSent)
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
@@ -228,7 +168,7 @@ func (c *CrossMessage) GetL2WithdrawalsByAddress(ctx context.Context, sender str
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
@@ -261,22 +201,22 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Model(&CrossMessage{})
txStatusUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case MessageQueueEventTypeQueueTransaction:
case btypes.MessageQueueEventTypeQueueTransaction:
continue
case MessageQueueEventTypeDequeueTransaction:
case btypes.MessageQueueEventTypeDequeueTransaction:
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
db = db.Where("tx_status != ?", TxStatusTypeDropped)
db = db.Where("tx_status != ?", types.TxStatusTypeRelayed)
db = db.Where("tx_status != ?", types.TxStatusTypeDropped)
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = TxStatusTypeSkipped
case MessageQueueEventTypeDropTransaction:
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeSkipped
case btypes.MessageQueueEventTypeDropTransaction:
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
db = db.Where("tx_status != ?", TxStatusTypeDropped)
db = db.Where("tx_status != ?", types.TxStatusTypeRelayed)
db = db.Where("tx_status != ?", types.TxStatusTypeDropped)
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = TxStatusTypeDropped
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeDropped
}
if err := db.Updates(txStatusUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
@@ -290,9 +230,9 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Model(&CrossMessage{})
txHashUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case MessageQueueEventTypeDequeueTransaction:
case btypes.MessageQueueEventTypeDequeueTransaction:
continue
case MessageQueueEventTypeQueueTransaction:
case btypes.MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.
// replayMessage case:
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
@@ -304,9 +244,9 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
case MessageQueueEventTypeDropTransaction:
case btypes.MessageQueueEventTypeDropTransaction:
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txHashUpdateFields["l1_refund_tx_hash"] = l1MessageQueueEvent.TxHash.String()
}
if err := db.Updates(txHashUpdateFields).Error; err != nil {
@@ -320,12 +260,12 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
func (c *CrossMessage) UpdateBatchStatusOfL2Withdrawals(ctx context.Context, startBlockNumber, endBlockNumber, batchIndex uint64) error {
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("l2_block_number >= ?", startBlockNumber)
db = db.Where("l2_block_number <= ?", endBlockNumber)
updateFields := make(map[string]interface{})
updateFields["batch_index"] = batchIndex
updateFields["rollup_status"] = RollupStatusTypeFinalized
updateFields["rollup_status"] = btypes.RollupStatusTypeFinalized
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch status of L2 sent messages, start: %v, end: %v, index: %v, error: %w", startBlockNumber, endBlockNumber, batchIndex, err)
}
@@ -462,7 +402,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
mergedL2RelayedMessages := make(map[string]*CrossMessage)
for _, message := range l2RelayedMessages {
if existing, found := mergedL2RelayedMessages[message.MessageHash]; found {
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
if types.TxStatusType(message.TxStatus) == types.TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
mergedL2RelayedMessages[message.MessageHash] = message
}
} else {
@@ -489,8 +429,8 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
Exprs: []clause.Expression{
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeDropped},
),
},
},
@@ -520,7 +460,7 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
mergedL1RelayedMessages := make(map[string]*CrossMessage)
for _, message := range l1RelayedMessages {
if existing, found := mergedL1RelayedMessages[message.MessageHash]; found {
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
if types.TxStatusType(message.TxStatus) == types.TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
mergedL1RelayedMessages[message.MessageHash] = message
}
} else {
@@ -541,8 +481,8 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
Exprs: []clause.Expression{
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeDropped},
),
},
},

View File

@@ -0,0 +1,38 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE bridge_batch_deposit_event_v2
(
id BIGSERIAL PRIMARY KEY,
token_type SMALLINT NOT NULL,
sender VARCHAR NOT NULL,
batch_index BIGINT DEFAULT NULL,
token_amount VARCHAR NOT NULL,
fee VARCHAR NOT NULL,
l1_token_address VARCHAR DEFAULT NULL,
l2_token_address VARCHAR DEFAULT NULL,
l1_block_number BIGINT DEFAULT NULL,
l2_block_number BIGINT DEFAULT NULL,
l1_tx_hash VARCHAR DEFAULT NULL,
l1_log_index INTEGER DEFAULT NULL,
l2_tx_hash VARCHAR DEFAULT NULL,
tx_status SMALLINT NOT NULL,
block_timestamp BIGINT NOT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX idx_l1hash_l1logindex ON bridge_batch_deposit_event_v2 (l1_tx_hash, l1_log_index);
CREATE INDEX IF NOT EXISTS idx_bbde_batchidx_sender ON bridge_batch_deposit_event_v2 (batch_index, sender);
CREATE INDEX IF NOT EXISTS idx_bbde_l1_block_number ON bridge_batch_deposit_event_v2 (l1_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l2_block_number ON bridge_batch_deposit_event_v2 (l2_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l1_tx_hash ON bridge_batch_deposit_event_v2 (l1_tx_hash DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l2_tx_hash ON bridge_batch_deposit_event_v2 (l2_tx_hash DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_sender_block_timestamp ON bridge_batch_deposit_event_v2 (sender, block_timestamp DESC);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS bridge_batch_deposit_event_v2;
-- +goose StatementEnd

View File

@@ -27,9 +27,9 @@ func Route(router *gin.Engine, conf *config.Config, reg prometheus.Registerer) {
r := router.Group("api/")
r.GET("/txs", api.HistoryCtrler.GetTxsByAddress)
r.GET("/l2/withdrawals", api.HistoryCtrler.GetL2WithdrawalsByAddress)
r.GET("/l2/unclaimed/withdrawals", api.HistoryCtrler.GetL2UnclaimedWithdrawalsByAddress)
r.GET("/txs", api.TxsByAddressCtl.GetTxsByAddress)
r.GET("/l2/withdrawals", api.L2WithdrawalsByAddressCtl.GetL2WithdrawalsByAddress)
r.GET("/l2/unclaimed/withdrawals", api.L2UnclaimedWithdrawalsByAddressCtl.GetL2UnclaimedWithdrawalsByAddress)
r.POST("/txsbyhashes", api.HistoryCtrler.PostQueryTxsByHashes)
r.POST("/txsbyhashes", api.TxsByHashesCtl.PostQueryTxsByHashes)
}

View File

@@ -0,0 +1,93 @@
package types
// TxStatusType represents the status of a transaction.
type TxStatusType int
// Constants for TxStatusType.
const (
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
// from a later status (e.g., relayed) back to "sent".
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
TxStatusTypeSent TxStatusType = iota
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
TxStatusTypeRelayed // Terminal status.
// TxStatusTypeFailedRelayed Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeFailedRelayed
// TxStatusTypeRelayTxReverted Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeRelayTxReverted
TxStatusTypeSkipped
TxStatusTypeDropped // Terminal status.
// TxStatusBridgeBatchDeposit use deposit token to bridge batch deposit contract
TxStatusBridgeBatchDeposit
// TxStatusBridgeBatchDistribute bridge batch deposit contract distribute tokens to user success
TxStatusBridgeBatchDistribute
// TxStatusBridgeBatchDistributeFailed bridge batch deposit contract distribute tokens to user failed
TxStatusBridgeBatchDistributeFailed
)
// TokenType represents the type of token.
type TokenType int
// Constants for TokenType.
const (
TokenTypeUnknown TokenType = iota
TokenTypeETH
TokenTypeERC20
TokenTypeERC721
TokenTypeERC1155
)
// MessageType represents the type of message.
type MessageType int
// Constants for MessageType.
const (
MessageTypeUnknown MessageType = iota
MessageTypeL1SentMessage
MessageTypeL2SentMessage
MessageTypeL1BatchDeposit
)
// RollupStatusType represents the status of a rollup.
type RollupStatusType int
// Constants for RollupStatusType.
const (
RollupStatusTypeUnknown RollupStatusType = iota
RollupStatusTypeFinalized // only batch finalized status is used.
)
// MessageQueueEventType represents the type of message queue event.
type MessageQueueEventType int
// Constants for MessageQueueEventType.
const (
MessageQueueEventTypeUnknown MessageQueueEventType = iota
MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction
)
// BatchStatusType represents the type of batch status.
type BatchStatusType int
// Constants for BatchStatusType.
const (
BatchStatusTypeUnknown BatchStatusType = iota
BatchStatusTypeCommitted
BatchStatusTypeReverted
BatchStatusTypeFinalized
)
// UpdateStatusType represents the whether batch info is updated in message table.
type UpdateStatusType int
// Constants for UpdateStatusType.
const (
UpdateStatusTypeUnupdated UpdateStatusType = iota
UpdateStatusTypeUpdated
)

View File

@@ -4,8 +4,6 @@ import (
"net/http"
"github.com/gin-gonic/gin"
"scroll-tech/bridge-history-api/internal/orm"
)
const (
@@ -79,17 +77,18 @@ type TxHistoryInfo struct {
ReplayTxHash string `json:"replay_tx_hash"`
RefundTxHash string `json:"refund_tx_hash"`
MessageHash string `json:"message_hash"`
TokenType orm.TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
TokenType TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
TokenIDs []string `json:"token_ids"` // only for erc721 and erc1155
TokenAmounts []string `json:"token_amounts"` // for eth and erc20, the length is 1, for erc721 and erc1155, the length could be > 1
MessageType orm.MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
MessageType MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
L1TokenAddress string `json:"l1_token_address"`
L2TokenAddress string `json:"l2_token_address"`
BlockNumber uint64 `json:"block_number"`
TxStatus orm.TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
TxStatus TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
CounterpartChainTx *CounterpartChainTx `json:"counterpart_chain_tx"`
ClaimInfo *ClaimInfo `json:"claim_info"`
BlockTimestamp uint64 `json:"block_timestamp"`
BatchDepositFee string `json:"batch_deposit_fee"` // only for bridge batch deposit
}
// RenderJSON renders response with json

View File

@@ -104,10 +104,12 @@ linters-settings:
# minimal occurrences count to trigger, 3 by default
min-occurrences: 3
depguard:
list-type: blacklist
include-go-root: false
packages:
- github.com/davecgh/go-spew/spew
rules:
main:
files:
- $all
deny:
- pkg: "github.com/davecgh/go-spew/spew"
misspell:
# Correct spellings using locale preferences for US or UK.
# Default is to use a neutral variety of English.

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY ./bridge-history-api/go.* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as chef
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
WORKDIR app
FROM chef as planner
@@ -17,7 +17,7 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,8 @@
ARG LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
ARG SCROLL_LIB_PATH=/scroll/lib
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
@@ -15,12 +18,37 @@ RUN go mod download -x
# Build event_watcher
FROM base as builder
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
# Pull event_watcher into a second stage deploy alpine container
FROM alpine:latest
FROM ubuntu:20.04
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
COPY --from=builder /bin/event_watcher /bin/
WORKDIR /app
ENTRYPOINT ["event_watcher"]

View File

@@ -1,5 +1,8 @@
ARG LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
ARG SCROLL_LIB_PATH=/scroll/lib
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
@@ -15,12 +18,37 @@ RUN go mod download -x
# Build gas_oracle
FROM base as builder
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
# Pull gas_oracle into a second stage deploy alpine container
FROM alpine:latest
FROM ubuntu:20.04
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
COPY --from=builder /bin/gas_oracle /bin/
WORKDIR /app
ENTRYPOINT ["gas_oracle"]

View File

@@ -1,8 +1,8 @@
ifeq ($(GO_VERSION),)
GO_VERSION=1.20
GO_VERSION=1.21
endif
ifeq ($(RUST_VERSION),)
RUST_VERSION=nightly-2022-12-10
RUST_VERSION=nightly-2023-12-03
endif
ifeq ($(PYTHON_VERSION),)
PYTHON_VERSION=3.10

View File

@@ -1,6 +1,6 @@
ARG CUDA_VERSION=11.7.1
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04

View File

@@ -1,4 +1,4 @@
ARG GO_VERSION=1.20
ARG GO_VERSION=1.21
FROM golang:${GO_VERSION}-alpine

View File

@@ -1,5 +1,5 @@
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM golang:${GO_VERSION}-alpine

View File

@@ -1,5 +1,5 @@
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM ubuntu:20.04

View File

@@ -1,5 +1,5 @@
ARG ALPINE_VERSION=3.15
ARG RUST_VERSION=nightly-2022-12-10
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM alpine:${ALPINE_VERSION}

View File

@@ -1,4 +1,4 @@
ARG RUST_VERSION=nightly-2022-12-10
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM ubuntu:20.04

View File

@@ -1,11 +0,0 @@
# Start from the latest golang base image
FROM golang:1.21
# Install Docker
RUN apt-get update && apt-get install -y docker.io docker-compose
# Set the working directory
WORKDIR /go/src/app
# This container will be executable
ENTRYPOINT [ "/bin/bash" ]

View File

@@ -1,5 +1,8 @@
ARG LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
ARG SCROLL_LIB_PATH=/scroll/lib
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
@@ -15,12 +18,37 @@ RUN go mod download -x
# Build rollup_relayer
FROM base as builder
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy alpine container
FROM alpine:latest
FROM ubuntu:20.04
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
COPY --from=builder /bin/rollup_relayer /bin/
WORKDIR /app
ENTRYPOINT ["rollup_relayer"]

View File

@@ -15,7 +15,7 @@ import (
const (
// GolangCIVersion to be used for linting.
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2"
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.2"
)
// GOBIN environment variable.
@@ -51,7 +51,7 @@ func lint() {
}
cmd = exec.Command(filepath.Join(goBin(), "golangci-lint"))
cmd.Args = append(cmd.Args, "run", "--config", "../build/.golangci.yml")
cmd.Args = append(cmd.Args, "run", "--config", "../build/.golangci.yml", "--timeout", "10m")
if *v {
cmd.Args = append(cmd.Args, "-v")

View File

@@ -1,127 +0,0 @@
package dockercompose
import (
"context"
"crypto/rand"
"fmt"
"math/big"
"os"
"path/filepath"
"time"
"github.com/cloudflare/cfssl/log"
"github.com/scroll-tech/go-ethereum/ethclient"
tc "github.com/testcontainers/testcontainers-go/modules/compose"
"github.com/testcontainers/testcontainers-go/wait"
)
// PoSL1TestEnv represents the config needed to test in PoS Layer 1.
type PoSL1TestEnv struct {
dockerComposeFile string
compose tc.ComposeStack
gethHTTPPort int
hostPath string
}
// NewPoSL1TestEnv creates and initializes a new instance of PoSL1TestEnv with a random HTTP port.
func NewPoSL1TestEnv() (*PoSL1TestEnv, error) {
rootDir, err := findProjectRootDir()
if err != nil {
return nil, fmt.Errorf("failed to find project root directory: %v", err)
}
hostPath, found := os.LookupEnv("HOST_PATH")
if !found {
hostPath = ""
}
rnd, err := rand.Int(rand.Reader, big.NewInt(65536-1024))
if err != nil {
return nil, fmt.Errorf("failed to generate a random: %v", err)
}
gethHTTPPort := int(rnd.Int64()) + 1024
if err := os.Setenv("GETH_HTTP_PORT", fmt.Sprintf("%d", gethHTTPPort)); err != nil {
return nil, fmt.Errorf("failed to set GETH_HTTP_PORT: %v", err)
}
return &PoSL1TestEnv{
dockerComposeFile: filepath.Join(rootDir, "common", "docker-compose", "l1", "docker-compose.yml"),
gethHTTPPort: gethHTTPPort,
hostPath: hostPath,
}, nil
}
// Start starts the PoS L1 test environment by running the associated Docker Compose configuration.
func (e *PoSL1TestEnv) Start() error {
var err error
e.compose, err = tc.NewDockerCompose([]string{e.dockerComposeFile}...)
if err != nil {
return fmt.Errorf("failed to create docker compose: %w", err)
}
env := map[string]string{
"GETH_HTTP_PORT": fmt.Sprintf("%d", e.gethHTTPPort),
}
if e.hostPath != "" {
env["HOST_PATH"] = e.hostPath
}
if err = e.compose.WaitForService("geth", wait.NewHTTPStrategy("/").WithPort("8545/tcp").WithStartupTimeout(15*time.Second)).WithEnv(env).Up(context.Background()); err != nil {
if errStop := e.Stop(); errStop != nil {
log.Error("failed to stop PoS L1 test environment", "err", errStop)
}
return fmt.Errorf("failed to start PoS L1 test environment: %w", err)
}
return nil
}
// Stop stops the PoS L1 test environment by stopping and removing the associated Docker Compose services.
func (e *PoSL1TestEnv) Stop() error {
if e.compose != nil {
if err := e.compose.Down(context.Background(), tc.RemoveOrphans(true), tc.RemoveVolumes(true), tc.RemoveImagesLocal); err != nil {
return fmt.Errorf("failed to stop PoS L1 test environment: %w", err)
}
}
return nil
}
// Endpoint returns the HTTP endpoint for the PoS L1 test environment.
func (e *PoSL1TestEnv) Endpoint() string {
return fmt.Sprintf("http://127.0.0.1:%d", e.gethHTTPPort)
}
// L1Client returns an ethclient by dialing the running PoS L1 test environment
func (e *PoSL1TestEnv) L1Client() (*ethclient.Client, error) {
if e == nil {
return nil, fmt.Errorf("PoS L1 test environment is not initialized")
}
client, err := ethclient.Dial(e.Endpoint())
if err != nil {
return nil, fmt.Errorf("failed to dial PoS L1 test environment: %w", err)
}
return client, nil
}
func findProjectRootDir() (string, error) {
currentDir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get working directory: %w", err)
}
for {
_, err := os.Stat(filepath.Join(currentDir, "go.work"))
if err == nil {
return currentDir, nil
}
parentDir := filepath.Dir(currentDir)
if parentDir == currentDir {
return "", fmt.Errorf("go.work file not found in any parent directory")
}
currentDir = parentDir
}
}

View File

@@ -5,45 +5,44 @@ go 1.21
require (
github.com/Masterminds/semver/v3 v3.2.1
github.com/bits-and-blooms/bitset v1.12.0
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004
github.com/docker/docker v25.0.3+incompatible
github.com/docker/docker v26.1.0+incompatible
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/stretchr/testify v1.9.0
github.com/testcontainers/testcontainers-go v0.28.0
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0
github.com/testcontainers/testcontainers-go v0.30.0
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
github.com/testcontainers/testcontainers-go/modules/postgres v0.30.0
github.com/urfave/cli/v2 v2.25.7
gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.5
gorm.io/driver/postgres v1.5.7
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
require (
dario.cat/mergo v1.0.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/AlecAivazis/survey/v2 v2.3.7 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.11.4 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2 v1.17.6 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.16 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.16 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/aws/aws-sdk-go-v2 v1.21.2 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.45 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect
github.com/aws/smithy-go v1.15.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/buger/goterm v1.0.4 // indirect
@@ -52,7 +51,8 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/compose-spec/compose-go/v2 v2.0.0-rc.2 // indirect
github.com/cloudflare/cfssl v1.6.5 // indirect
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/containerd/console v1.0.3 // indirect
@@ -62,13 +62,13 @@ require (
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/cpuguy83/dockercfg v0.3.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/distribution/reference v0.5.0 // indirect
github.com/docker/buildx v0.12.0-rc2.0.20231219140829-617f538cb315 // indirect
github.com/docker/cli v25.0.1+incompatible // indirect
github.com/docker/compose/v2 v2.24.3 // indirect
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
github.com/docker/compose/v2 v2.24.7 // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.0 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
@@ -94,7 +94,6 @@ require (
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.15.5 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gofrs/flock v0.8.1 // indirect
@@ -143,7 +142,6 @@ require (
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
@@ -152,6 +150,7 @@ require (
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/moby/buildkit v0.13.0-beta1.0.20231219135447-957cb50df991 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
@@ -175,15 +174,15 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.7.1 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
github.com/shibumi/go-pathspec v1.3.0 // indirect
@@ -193,8 +192,9 @@ require (
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/cobra v1.8.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.4.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
@@ -213,7 +213,7 @@ require (
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel v1.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect
@@ -229,23 +229,23 @@ require (
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/mock v0.4.0 // indirect
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.17.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/oauth2 v0.11.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/term v0.15.0 // indirect
golang.org/x/term v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.15.0 // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/grpc v1.59.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
@@ -255,7 +255,7 @@ require (
k8s.io/apimachinery v0.26.7 // indirect
k8s.io/apiserver v0.26.7 // indirect
k8s.io/client-go v0.26.7 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
rsc.io/tmplfunc v0.0.3 // indirect

View File

@@ -1,7 +1,7 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
@@ -12,11 +12,9 @@ github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59M
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
@@ -38,32 +36,33 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc=
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0=
github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/config v1.18.16 h1:4r7gsCu8Ekwl5iJGE/GmspA2UifqySCCkyyyPFeWs3w=
github.com/aws/aws-sdk-go-v2/config v1.18.16/go.mod h1:XjM6lVbq7UgELp9NjXBrb1DQY/ownlWsvDhEQksemJc=
github.com/aws/aws-sdk-go-v2/credentials v1.13.16 h1:GgToSxaENX/1zXIGNFfiVk4hxryYJ5Vt4Mh8XLAL7Lc=
github.com/aws/aws-sdk-go-v2/credentials v1.13.16/go.mod h1:KP7aFJhfwPFgx9aoVYL2nYHjya5WBD98CWaadpgmnpY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 h1:5qyqXASrX2zy5cTnoHHa4N2c3Lc94GH7gjnBP3GwKdU=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24/go.mod h1:neYVaeKr5eT7BzwULuG2YbLhzWZ22lpjKdCybR7AXrQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 h1:y+8n9AGDjikyXoMBTRaHHHSaFEB8267ykmvyPodJfys=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 h1:r+Kv+SEJquhAZXaJ7G4u44cIwXV3f8K+N482NNAzJZA=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 h1:hf+Vhp5WtTdcSdE+yEcUz8L73sAzN0R+0jQv+Z51/mI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31/go.mod h1:5zUjguZfG5qjhG9/wqmuyHRyUftl2B5Cp6NNxNC6kRA=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24 h1:c5qGfdbCHav6viBwiyDns3OXqhqAbGjfIB4uVu2ayhk=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24/go.mod h1:HMA4FZG6fyib+NDo5bpIxX1EhYjrAOveZJY2YR0xrNE=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5 h1:bdKIX6SVF3nc3xJFw6Nf0igzS6Ff/louGq8Z6VP/3Hs=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5/go.mod h1:vuWiaDB30M/QTC+lI3Wj6S/zb7tpUK2MSYgy3Guh2L0=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5 h1:xLPZMyuZ4GuqRCIec/zWuIhRFPXh2UOJdLXBSi64ZWQ=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5/go.mod h1:QjxpHmCwAg0ESGtPQnLIVp7SedTOBMYy+Slr3IfMKeI=
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 h1:rIFn5J3yDoeuKCE9sESXqM5POTAhOP1du3bv/qTL+tE=
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6/go.mod h1:48WJ9l3dwP0GSHWGc5sFGGlCkuA82Mc2xnw+T6Q8aDw=
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA=
github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM=
github.com/aws/aws-sdk-go-v2/config v1.18.45 h1:Aka9bI7n8ysuwPeFdm77nfbyHCAKQ3z9ghB3S/38zes=
github.com/aws/aws-sdk-go-v2/config v1.18.45/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE=
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8=
github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck=
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k=
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg=
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU=
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ=
github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8=
github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -110,15 +109,16 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpV
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/cloudflare/cfssl v1.6.5 h1:46zpNkm6dlNkMZH/wMW22ejih6gIaJbzL2du6vD7ZeI=
github.com/cloudflare/cfssl v1.6.5/go.mod h1:Bk1si7sq8h2+yVEDrFJiz3d7Aw+pfjjJSZVaD+Taky4=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.2 h1:eJ01FpliL/02KvsaPyH1bSLbM1S70yWQUojHVRbyvy4=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.2/go.mod h1:IVsvFyGVhw4FASzUtlWNVaAOhYmakXAFY9IlZ7LAuD8=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 h1:NlkpaLBPFr05mNJWVMH7PP4L30gFG6k4z1QpypLUSh8=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60/go.mod h1:bEPizBkIojlQ20pi2vNluBa58tevvj0Y18oUSHPyfdc=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
@@ -144,12 +144,17 @@ github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtO
github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak=
github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
@@ -161,20 +166,21 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/buildx v0.12.0-rc2.0.20231219140829-617f538cb315 h1:UZxx9xBADdf/9UmSdEUi+pdJoPKpgcf9QUAY5gEIYmY=
github.com/docker/buildx v0.12.0-rc2.0.20231219140829-617f538cb315/go.mod h1:X8ZHhuW6ncwtoJ36TlU+gyaROTcBkTE01VHYmTStQCE=
github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU=
github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/compose/v2 v2.24.3 h1:BVc1oDV7aQgksH64pDKTvcI95G36uJ+Mz9DGGBBoZeQ=
github.com/docker/compose/v2 v2.24.3/go.mod h1:D8Nv9+juzD7xiMyyHJ7G2J/MOYiGBmb9SvdIW5+2zKo=
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible h1:xhVCHXq+P5LhT31+RuDuk0xXEbEnd50Fr37J1bGuyWg=
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/compose/v2 v2.24.7 h1:1WSo4CVf18tnGJMC6V78jYsAxSDD61ry6L3JwVT+8EI=
github.com/docker/compose/v2 v2.24.7/go.mod h1:7U3QbXdRJfBylTgkdlrjOg8hWLZqM09mof9DVZ5Fh4E=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v26.1.0+incompatible h1:W1G9MPNbskA6VZWL7b3ZljTh0pXI68FpINx0GKaOdaM=
github.com/docker/docker v26.1.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
@@ -224,6 +230,7 @@ github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazzvg=
github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
@@ -283,12 +290,14 @@ github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -311,8 +320,10 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/certificate-transparency-go v1.1.7 h1:IASD+NtgSTJLPdzkthwvAG1ZVbF2WtFg4IvoA68XGSw=
github.com/google/certificate-transparency-go v1.1.7/go.mod h1:FSSBo8fyMVgqptbfF6j5p/XNdgQftAhSmXcIxV9iphE=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -336,11 +347,15 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
@@ -358,6 +373,8 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
@@ -380,10 +397,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
@@ -399,6 +414,9 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -412,6 +430,7 @@ github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVE
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
@@ -445,6 +464,7 @@ github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -466,8 +486,6 @@ github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebG
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
@@ -476,6 +494,7 @@ github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -488,6 +507,8 @@ github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqky
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/moby/buildkit v0.13.0-beta1.0.20231219135447-957cb50df991 h1:r80LLQ91uOLxU1ElAvrB1o8oBsph51lPzVnr7t2b200=
github.com/moby/buildkit v0.13.0-beta1.0.20231219135447-957cb50df991/go.mod h1:6MddWPSL5jxy+W8eMMHWDOfZzzRRKWXPZqajw72YHBc=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
@@ -554,6 +575,7 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
@@ -570,24 +592,27 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
@@ -599,6 +624,7 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@@ -607,10 +633,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=
@@ -631,21 +657,28 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spdx/tools-golang v0.5.1 h1:fJg3SVOGG+eIva9ZUBm/hvyA7PIPVFjRxUKe6fdAgwE=
github.com/spdx/tools-golang v0.5.1/go.mod h1:/DRDQuBfB37HctM29YtrX1v+bXiVmT2OpQDalRmX9aU=
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94 h1:JmfC365KywYwHB946TTiQWEb8kqPY+pybPLoGE9GgVk=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431 h1:XTHrT015sxHyJ5FnQ0AeemSspZWaDq7DoTRW0EVsDCE=
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c h1:2EejZtjFjKJGk71ANb+wtFK5EjUzUkEM3R0xnp559xg=
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
@@ -667,16 +700,16 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0=
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/testcontainers/testcontainers-go v0.28.0 h1:1HLm9qm+J5VikzFDYhOd+Zw12NtOl+8drH2E8nTY1r8=
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0 h1:QOCeTYZIYixg796Ik60MOaeMgpAKPbQd5pJOdTrftyg=
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0 h1:ff0s4JdYIdNAVSi/SrpN2Pdt1f+IjIw3AKjbHau8Un4=
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
github.com/testcontainers/testcontainers-go v0.30.0/go.mod h1:K+kHNGiM5zjklKjgTtcrEetF3uhWbMUyqAQoyoh8Pf0=
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0 h1:k5ZbLDlO9AGJ5N2GRqVXL3L2gs+ZHXBfTpT2+jFNtgA=
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0/go.mod h1:+9x1MOKxi1SF+s7iuNxwW0fRQMm4trp6QvZm1fiJdaA=
github.com/testcontainers/testcontainers-go/modules/postgres v0.30.0 h1:D3HFqpZS90iRGAN7M85DFiuhPfvYvFNnx8urQ6mPAvo=
github.com/testcontainers/testcontainers-go/modules/postgres v0.30.0/go.mod h1:e1sKxwUOkqzvaqdHl/oV9mUtFmkDPTfBGp0po2tnWQU=
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=
@@ -685,6 +718,7 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 h1:ZT8ibgassurSISJ1Pj26NsM3vY2jxFZn63Nd/TpHmRw=
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302/go.mod h1:9kMVqMyQ/Sx2df5LtnGG+nbrmiZzCS7V6gjW3oGHsvI=
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
@@ -695,6 +729,7 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
@@ -703,6 +738,8 @@ github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
github.com/weppos/publicsuffix-go v0.30.0 h1:QHPZ2GRu/YE7cvejH9iyavPOkVCB4dNxp2ZvtT+vQLY=
github.com/weppos/publicsuffix-go v0.30.0/go.mod h1:kBi8zwYnR0zrbm8RcuN1o9Fzgpnnn+btVN8uWPMyXAY=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -710,6 +747,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -717,14 +756,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300 h1:DZH5n7L3L8RxKdSyJHZt7WePgwdhHnPhQFdQSJaHF+o=
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300/go.mod h1:mOd4yUMgn2fe2nV9KXsa9AyQBFZGzygVPovsZR+Rl5w=
github.com/zmap/zlint/v3 v3.5.0 h1:Eh2B5t6VKgVH0DFmTwOqE50POvyDhUaU9T2mJOe1vfQ=
github.com/zmap/zlint/v3 v3.5.0/go.mod h1:JkNSrsDJ8F4VRtBZcYUQSvnWFL7utcjDIn+FE64mlBI=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 h1:RsQi0qJ2imFfCvZabqzM9cNXBG8k6gXMv1A0cXRmH6A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0/go.mod h1:vsh3ySueQCiKPxFLvjWC4Z135gIa34TQ/NSqkDTZYUM=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 h1:2ea0IkZBsWH+HA2GkD+7+hRw2u97jzdFyRtXuO14a1s=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0/go.mod h1:4m3RnBBb+7dB9d21y510oO1pdB1V4J6smNf14WXcBFQ=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU=
@@ -771,12 +815,11 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -789,9 +832,11 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -801,12 +846,11 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -815,7 +859,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -863,20 +906,20 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -887,8 +930,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -902,14 +945,15 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg=
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY=
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -929,8 +973,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
@@ -948,12 +992,14 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM=
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -966,11 +1012,10 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/driver/postgres v1.5.7 h1:8ptbNJTDbEmhdr62uReG5BGkdQyeasu/FZHxI0IMGnM=
gorm.io/driver/postgres v1.5.7/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA=
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde h1:9DShaph9qhkIYw7QF91I/ynrr4cOO2PZra2PFD7Mfeg=
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gotest.tools v1.4.0 h1:BjtEgfuw8Qyd+jPvQz8CfoxiO/UjFEidWinwEXZiWv0=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
@@ -984,8 +1029,8 @@ k8s.io/apiserver v0.26.7 h1:NX/zBZZn4R+Cq6shwyn8Pn8REd0yJJ16dbtv9WkEVEU=
k8s.io/apiserver v0.26.7/go.mod h1:r0wDRWHI7VL/KlQLTkJJBVGZ3KeNfv+VetlyRtr86xs=
k8s.io/client-go v0.26.7 h1:hyU9aKHlwVOykgyxzGYkrDSLCc4+mimZVyUJjPyUn1E=
k8s.io/client-go v0.26.7/go.mod h1:okYjy0jtq6sdeztALDvCh24tg4opOQS1XNvsJlERDAo=
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk=

View File

@@ -8,14 +8,14 @@ services:
mkdir -p /data/execution &&
cp -a /execution/* /data/execution/"
volumes:
- ${HOST_PATH:-../../..}/common/docker-compose/l1/consensus:/consensus
- ${HOST_PATH:-../../..}/common/docker-compose/l1/execution:/execution
- ../../common/testcontainers/consensus:/consensus
- ../../common/testcontainers/execution:/execution
- data:/data
# Creates a genesis state for the beacon chain using a YAML configuration file and
# a deterministic set of 64 validators.
create-beacon-chain-genesis:
image: "gcr.io/prysmaticlabs/prysm/cmd/prysmctl:latest"
image: "gcr.io/prysmaticlabs/prysm/cmd/prysmctl:HEAD-263557"
command:
- testnet
- generate-genesis
@@ -96,7 +96,7 @@ services:
- --nodiscover
- --syncmode=full
ports:
- ${GETH_HTTP_PORT:-8545}:8545
- 8545
depends_on:
geth-genesis:
condition: service_completed_successfully

View File

@@ -4,10 +4,13 @@ import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/compose"
"github.com/testcontainers/testcontainers-go/modules/postgres"
"github.com/testcontainers/testcontainers-go/wait"
"gorm.io/gorm"
@@ -18,8 +21,8 @@ import (
// TestcontainerApps testcontainers struct
type TestcontainerApps struct {
postgresContainer *postgres.PostgresContainer
l1GethContainer *testcontainers.DockerContainer
l2GethContainer *testcontainers.DockerContainer
poSL1Container compose.ComposeStack
// common time stamp in nanoseconds.
Timestamp int
@@ -28,6 +31,11 @@ type TestcontainerApps struct {
// NewTestcontainerApps returns new instance of TestcontainerApps struct
func NewTestcontainerApps() *TestcontainerApps {
timestamp := time.Now().Nanosecond()
// In order to solve the problem of "creating reaper failed: failed to create container"
// refer to https://github.com/testcontainers/testcontainers-go/issues/2172
if err := os.Setenv("TESTCONTAINERS_RYUK_DISABLED", "true"); err != nil {
panic("set env failed: " + err.Error())
}
return &TestcontainerApps{
Timestamp: timestamp,
}
@@ -53,30 +61,6 @@ func (t *TestcontainerApps) StartPostgresContainer() error {
return nil
}
// StartL1GethContainer starts a L1Geth container
func (t *TestcontainerApps) StartL1GethContainer() error {
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
return nil
}
req := testcontainers.ContainerRequest{
Image: "scroll_l1geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
Cmd: []string{"--log.debug", "ANY"},
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
}
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
if err != nil {
log.Printf("failed to start scroll_l1geth container: %s", err)
return err
}
t.l1GethContainer, _ = container.(*testcontainers.DockerContainer)
return nil
}
// StartL2GethContainer starts a L2Geth container
func (t *TestcontainerApps) StartL2GethContainer() error {
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
@@ -85,7 +69,10 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
req := testcontainers.ContainerRequest{
Image: "scroll_l2geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
WaitingFor: wait.ForAll(
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
),
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
@@ -100,6 +87,51 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
return nil
}
// StartPoSL1Container starts the PoS L1 container by running the associated Docker Compose configuration
func (t *TestcontainerApps) StartPoSL1Container() error {
var (
err error
rootDir string
dockerComposeFile string
)
if rootDir, err = findProjectRootDir(); err != nil {
return fmt.Errorf("failed to find project root directory: %v", err)
}
dockerComposeFile = filepath.Join(rootDir, "common", "testcontainers", "docker-compose.yml")
if t.poSL1Container, err = compose.NewDockerCompose([]string{dockerComposeFile}...); err != nil {
return err
}
if err = t.poSL1Container.WaitForService("geth", wait.ForListeningPort("8545").WithStartupTimeout(15*time.Second)).Up(context.Background()); err != nil {
t.poSL1Container = nil
return fmt.Errorf("failed to start PoS L1 container: %w", err)
}
return nil
}
// GetPoSL1EndPoint returns the endpoint of the running PoS L1 endpoint
func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
if t.poSL1Container == nil {
return "", fmt.Errorf("PoS L1 container is not running")
}
contrainer, err := t.poSL1Container.ServiceContainer(context.Background(), "geth")
if err != nil {
return "", err
}
return contrainer.PortEndpoint(context.Background(), "8545/tcp", "http")
}
// GetPoSL1Client returns a ethclient by dialing running PoS L1 client
func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
endpoint, err := t.GetPoSL1EndPoint()
if err != nil {
return nil, err
}
return ethclient.Dial(endpoint)
}
// GetDBEndPoint returns the endpoint of the running postgres container
func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
if t.postgresContainer == nil || !t.postgresContainer.IsRunning() {
@@ -108,18 +140,6 @@ func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable")
}
// GetL1GethEndPoint returns the endpoint of the running L1Geth container
func (t *TestcontainerApps) GetL1GethEndPoint() (string, error) {
if t.l1GethContainer == nil || !t.l1GethContainer.IsRunning() {
return "", fmt.Errorf("l1 geth is not running")
}
endpoint, err := t.l1GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
if err != nil {
return "", err
}
return endpoint, nil
}
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() {
@@ -134,10 +154,11 @@ func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
endpoint, err := t.GetDBEndPoint()
if err != nil {
return nil, err
}
// endpoint, err := t.GetDBEndPoint()
// if err != nil {
// return nil, err
// }
endpoint := "postgres://lmr:@localhost:5432/unittest?sslmode=disable"
dbCfg := &database.Config{
DSN: endpoint,
DriverName: "postgres",
@@ -147,19 +168,6 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
return database.InitDB(dbCfg)
}
// GetL1GethClient returns a ethclient by dialing running L1Geth
func (t *TestcontainerApps) GetL1GethClient() (*ethclient.Client, error) {
endpoint, err := t.GetL1GethEndPoint()
if err != nil {
return nil, err
}
client, err := ethclient.Dial(endpoint)
if err != nil {
return nil, err
}
return client, nil
}
// GetL2GethClient returns a ethclient by dialing running L2Geth
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
endpoint, err := t.GetL2GethEndPoint()
@@ -181,14 +189,38 @@ func (t *TestcontainerApps) Free() {
log.Printf("failed to stop postgres container: %s", err)
}
}
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
if err := t.l1GethContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop scroll_l1geth container: %s", err)
}
}
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
if err := t.l2GethContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop scroll_l2geth container: %s", err)
}
}
if t.poSL1Container != nil {
if err := t.poSL1Container.Down(context.Background(), compose.RemoveOrphans(true), compose.RemoveVolumes(true), compose.RemoveImagesLocal); err != nil {
log.Printf("failed to stop PoS L1 container: %s", err)
} else {
t.poSL1Container = nil
}
}
}
// findProjectRootDir find project root directory
func findProjectRootDir() (string, error) {
currentDir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get working directory: %w", err)
}
for {
_, err := os.Stat(filepath.Join(currentDir, "go.work"))
if err == nil {
return currentDir, nil
}
parentDir := filepath.Dir(currentDir)
if parentDir == currentDir {
return "", fmt.Errorf("go.work file not found in any parent directory")
}
currentDir = parentDir
}
}

View File

@@ -17,8 +17,9 @@ func TestNewTestcontainerApps(t *testing.T) {
ethclient *ethclient.Client
)
// test start testcontainers
testApps := NewTestcontainerApps()
// test start testcontainers
assert.NoError(t, testApps.StartPostgresContainer())
endpoint, err = testApps.GetDBEndPoint()
assert.NoError(t, err)
@@ -27,14 +28,6 @@ func TestNewTestcontainerApps(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, gormDBclient)
assert.NoError(t, testApps.StartL1GethContainer())
endpoint, err = testApps.GetL1GethEndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetL1GethClient()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartL2GethContainer())
endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
@@ -43,17 +36,25 @@ func TestNewTestcontainerApps(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartPoSL1Container())
endpoint, err = testApps.GetPoSL1EndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetPoSL1Client()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
// test free testcontainers
testApps.Free()
endpoint, err = testApps.GetDBEndPoint()
assert.EqualError(t, err, "postgres is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetL1GethEndPoint()
assert.EqualError(t, err, "l1 geth is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetL2GethEndPoint()
assert.EqualError(t, err, "l2 geth is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetPoSL1EndPoint()
assert.EqualError(t, err, "PoS L1 container is not running")
assert.Empty(t, endpoint)
}

View File

@@ -1,64 +0,0 @@
package encoding
import (
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/core/types"
)
// ConstructSkippedBitmap constructs skipped L1 message bitmap of the batch.
func ConstructSkippedBitmap(batchIndex uint64, chunks []*Chunk, totalL1MessagePoppedBefore uint64) ([]byte, uint64, error) {
// skipped L1 message bitmap, an array of 256-bit bitmaps
var skippedBitmap []*big.Int
// the first queue index that belongs to this batch
baseIndex := totalL1MessagePoppedBefore
// the next queue index that we need to process
nextIndex := totalL1MessagePoppedBefore
for chunkID, chunk := range chunks {
for blockID, block := range chunk.Blocks {
for _, tx := range block.Transactions {
if tx.Type != types.L1MessageTxType {
continue
}
currentIndex := tx.Nonce
if currentIndex < nextIndex {
return nil, 0, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d. Batch index: %d, chunk index in batch: %d, block index in chunk: %d, block hash: %v, transaction hash: %v", nextIndex, currentIndex, batchIndex, chunkID, blockID, block.Header.Hash(), tx.TxHash)
}
// mark skipped messages
for skippedIndex := nextIndex; skippedIndex < currentIndex; skippedIndex++ {
quo := int((skippedIndex - baseIndex) / 256)
rem := int((skippedIndex - baseIndex) % 256)
for len(skippedBitmap) <= quo {
bitmap := big.NewInt(0)
skippedBitmap = append(skippedBitmap, bitmap)
}
skippedBitmap[quo].SetBit(skippedBitmap[quo], rem, 1)
}
// process included message
quo := int((currentIndex - baseIndex) / 256)
for len(skippedBitmap) <= quo {
bitmap := big.NewInt(0)
skippedBitmap = append(skippedBitmap, bitmap)
}
nextIndex = currentIndex + 1
}
}
}
bitmapBytes := make([]byte, len(skippedBitmap)*32)
for ii, num := range skippedBitmap {
bytes := num.Bytes()
padding := 32 - len(bytes)
copy(bitmapBytes[32*ii+padding:], bytes)
}
return bitmapBytes, nextIndex, nil
}

View File

@@ -1,463 +0,0 @@
package codecv0
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"math/big"
"strings"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"scroll-tech/common/types/encoding"
)
// CodecV0Version denotes the version of the codec.
const CodecV0Version = 0
// DABlock represents a Data Availability Block.
type DABlock struct {
BlockNumber uint64
Timestamp uint64
BaseFee *big.Int
GasLimit uint64
NumTransactions uint16
NumL1Messages uint16
}
// DAChunk groups consecutive DABlocks with their transactions.
type DAChunk struct {
Blocks []*DABlock
Transactions [][]*types.TransactionData
}
// DABatch contains metadata about a batch of DAChunks.
type DABatch struct {
Version uint8
BatchIndex uint64
L1MessagePopped uint64
TotalL1MessagePopped uint64
DataHash common.Hash
ParentBatchHash common.Hash
SkippedL1MessageBitmap []byte
}
// NewDABlock creates a new DABlock from the given encoding.Block and the total number of L1 messages popped before.
func NewDABlock(block *encoding.Block, totalL1MessagePoppedBefore uint64) (*DABlock, error) {
if !block.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
// note: numL1Messages includes skipped messages
numL1Messages := block.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
// note: numTransactions includes skipped messages
numL2Transactions := block.NumL2Transactions()
numTransactions := numL1Messages + numL2Transactions
if numTransactions > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
daBlock := DABlock{
BlockNumber: block.Header.Number.Uint64(),
Timestamp: block.Header.Time,
BaseFee: block.Header.BaseFee,
GasLimit: block.Header.GasLimit,
NumTransactions: uint16(numTransactions),
NumL1Messages: uint16(numL1Messages),
}
return &daBlock, nil
}
// Encode serializes the DABlock into a slice of bytes.
func (b *DABlock) Encode() []byte {
bytes := make([]byte, 60)
binary.BigEndian.PutUint64(bytes[0:], b.BlockNumber)
binary.BigEndian.PutUint64(bytes[8:], b.Timestamp)
if b.BaseFee != nil {
binary.BigEndian.PutUint64(bytes[40:], b.BaseFee.Uint64())
}
binary.BigEndian.PutUint64(bytes[48:], b.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], b.NumTransactions)
binary.BigEndian.PutUint16(bytes[58:], b.NumL1Messages)
return bytes
}
// NewDAChunk creates a new DAChunk from the given encoding.Chunk and the total number of L1 messages popped before.
func NewDAChunk(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64) (*DAChunk, error) {
var blocks []*DABlock
var txs [][]*types.TransactionData
if chunk == nil {
return nil, errors.New("chunk is nil")
}
if len(chunk.Blocks) == 0 {
return nil, errors.New("number of blocks is 0")
}
if len(chunk.Blocks) > 255 {
return nil, errors.New("number of blocks exceeds 1 byte")
}
for _, block := range chunk.Blocks {
b, err := NewDABlock(block, totalL1MessagePoppedBefore)
if err != nil {
return nil, err
}
blocks = append(blocks, b)
totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore)
txs = append(txs, block.Transactions)
}
daChunk := DAChunk{
Blocks: blocks,
Transactions: txs,
}
return &daChunk, nil
}
// Encode serializes the DAChunk into a slice of bytes.
func (c *DAChunk) Encode() ([]byte, error) {
var chunkBytes []byte
chunkBytes = append(chunkBytes, byte(len(c.Blocks)))
var l2TxDataBytes []byte
for _, block := range c.Blocks {
chunkBytes = append(chunkBytes, block.Encode()...)
}
for _, blockTxs := range c.Transactions {
for _, txData := range blockTxs {
if txData.Type == types.L1MessageTxType {
continue
}
var txLen [4]byte
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(txData)
if err != nil {
return nil, err
}
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
l2TxDataBytes = append(l2TxDataBytes, txLen[:]...)
l2TxDataBytes = append(l2TxDataBytes, rlpTxData...)
}
}
chunkBytes = append(chunkBytes, l2TxDataBytes...)
return chunkBytes, nil
}
// Hash computes the hash of the DAChunk data.
func (c *DAChunk) Hash() (common.Hash, error) {
chunkBytes, err := c.Encode()
if err != nil {
return common.Hash{}, err
}
if len(chunkBytes) == 0 {
return common.Hash{}, errors.New("chunk data is empty and cannot be processed")
}
numBlocks := chunkBytes[0]
// concatenate block contexts
var dataBytes []byte
for i := 0; i < int(numBlocks); i++ {
// only the first 58 bytes of each BlockContext are needed for the hashing process
dataBytes = append(dataBytes, chunkBytes[1+60*i:60*i+59]...)
}
// concatenate l1 and l2 tx hashes
for _, blockTxs := range c.Transactions {
var l1TxHashes []byte
var l2TxHashes []byte
for _, txData := range blockTxs {
txHash := strings.TrimPrefix(txData.TxHash, "0x")
hashBytes, err := hex.DecodeString(txHash)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to decode tx hash from TransactionData: hash=%v, err=%w", txData.TxHash, err)
}
if txData.Type == types.L1MessageTxType {
l1TxHashes = append(l1TxHashes, hashBytes...)
} else {
l2TxHashes = append(l2TxHashes, hashBytes...)
}
}
dataBytes = append(dataBytes, l1TxHashes...)
dataBytes = append(dataBytes, l2TxHashes...)
}
hash := crypto.Keccak256Hash(dataBytes)
return hash, nil
}
// NewDABatch creates a DABatch from the provided encoding.Batch.
func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
// compute batch data hash
var dataBytes []byte
totalL1MessagePoppedBeforeChunk := batch.TotalL1MessagePoppedBefore
for _, chunk := range batch.Chunks {
// build data hash
daChunk, err := NewDAChunk(chunk, totalL1MessagePoppedBeforeChunk)
if err != nil {
return nil, err
}
totalL1MessagePoppedBeforeChunk += chunk.NumL1Messages(totalL1MessagePoppedBeforeChunk)
daChunkHash, err := daChunk.Hash()
if err != nil {
return nil, err
}
dataBytes = append(dataBytes, daChunkHash.Bytes()...)
}
// compute data hash
dataHash := crypto.Keccak256Hash(dataBytes)
// skipped L1 messages bitmap
bitmapBytes, totalL1MessagePoppedAfter, err := encoding.ConstructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore)
if err != nil {
return nil, err
}
daBatch := DABatch{
Version: CodecV0Version,
BatchIndex: batch.Index,
L1MessagePopped: totalL1MessagePoppedAfter - batch.TotalL1MessagePoppedBefore,
TotalL1MessagePopped: totalL1MessagePoppedAfter,
DataHash: dataHash,
ParentBatchHash: batch.ParentBatchHash,
SkippedL1MessageBitmap: bitmapBytes,
}
return &daBatch, nil
}
// NewDABatchFromBytes attempts to decode the given byte slice into a DABatch.
func NewDABatchFromBytes(data []byte) (*DABatch, error) {
if len(data) < 89 {
return nil, fmt.Errorf("insufficient data for DABatch, expected at least 89 bytes but got %d", len(data))
}
b := &DABatch{
Version: data[0],
BatchIndex: binary.BigEndian.Uint64(data[1:9]),
L1MessagePopped: binary.BigEndian.Uint64(data[9:17]),
TotalL1MessagePopped: binary.BigEndian.Uint64(data[17:25]),
DataHash: common.BytesToHash(data[25:57]),
ParentBatchHash: common.BytesToHash(data[57:89]),
SkippedL1MessageBitmap: data[89:],
}
return b, nil
}
// Encode serializes the DABatch into bytes.
func (b *DABatch) Encode() []byte {
batchBytes := make([]byte, 89+len(b.SkippedL1MessageBitmap))
batchBytes[0] = b.Version
binary.BigEndian.PutUint64(batchBytes[1:], b.BatchIndex)
binary.BigEndian.PutUint64(batchBytes[9:], b.L1MessagePopped)
binary.BigEndian.PutUint64(batchBytes[17:], b.TotalL1MessagePopped)
copy(batchBytes[25:], b.DataHash[:])
copy(batchBytes[57:], b.ParentBatchHash[:])
copy(batchBytes[89:], b.SkippedL1MessageBitmap[:])
return batchBytes
}
// Hash computes the hash of the serialized DABatch.
func (b *DABatch) Hash() common.Hash {
bytes := b.Encode()
return crypto.Keccak256Hash(bytes)
}
// DecodeFromCalldata attempts to decode a DABatch and an array of DAChunks from the provided calldata byte slice.
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
// TODO: implement this function.
return nil, nil, nil
}
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
const CalldataNonZeroByteGas = 16
// GetKeccak256Gas calculates the gas cost for computing the keccak256 hash of a given size.
func GetKeccak256Gas(size uint64) uint64 {
return GetMemoryExpansionCost(size) + 30 + 6*((size+31)/32)
}
// GetMemoryExpansionCost calculates the cost of memory expansion for a given memoryByteSize.
func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
memorySizeWord := (memoryByteSize + 31) / 32
memoryCost := (memorySizeWord*memorySizeWord)/512 + (3 * memorySizeWord)
return memoryCost
}
// EstimateBlockL1CommitCalldataSize calculates the calldata size in l1 commit for this block approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func EstimateBlockL1CommitCalldataSize(b *encoding.Block) (uint64, error) {
var size uint64
for _, txData := range b.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
size += 4 // 4 bytes payload length
txPayloadLength, err := getTxPayloadLength(txData)
if err != nil {
return 0, err
}
size += txPayloadLength
}
size += 60 // 60 bytes BlockContext
return size, nil
}
// EstimateBlockL1CommitGas calculates the total L1 commit gas for this block approximately.
func EstimateBlockL1CommitGas(b *encoding.Block) (uint64, error) {
var total uint64
var numL1Messages uint64
for _, txData := range b.Transactions {
if txData.Type == types.L1MessageTxType {
numL1Messages++
continue
}
txPayloadLength, err := getTxPayloadLength(txData)
if err != nil {
return 0, err
}
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
total += CalldataNonZeroByteGas * 4 // 4 bytes payload length
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
}
// 60 bytes BlockContext calldata
total += CalldataNonZeroByteGas * 60
// sload
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
// staticcall
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue
total += GetMemoryExpansionCost(36) * numL1Messages // staticcall to proxy
total += 100 * numL1Messages // read admin in proxy
total += 100 * numL1Messages // read impl in proxy
total += 100 * numL1Messages // access impl
total += GetMemoryExpansionCost(36) * numL1Messages // delegatecall to impl
return total, nil
}
// EstimateChunkL1CommitCalldataSize calculates the calldata size needed for committing a chunk to L1 approximately.
func EstimateChunkL1CommitCalldataSize(c *encoding.Chunk) (uint64, error) {
var totalL1CommitCalldataSize uint64
for _, block := range c.Blocks {
blockL1CommitCalldataSize, err := EstimateBlockL1CommitCalldataSize(block)
if err != nil {
return 0, err
}
totalL1CommitCalldataSize += blockL1CommitCalldataSize
}
return totalL1CommitCalldataSize, nil
}
// EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately.
func EstimateChunkL1CommitGas(c *encoding.Chunk) (uint64, error) {
var totalTxNum uint64
var totalL1CommitGas uint64
for _, block := range c.Blocks {
totalTxNum += uint64(len(block.Transactions))
blockL1CommitGas, err := EstimateBlockL1CommitGas(block)
if err != nil {
return 0, err
}
totalL1CommitGas += blockL1CommitGas
}
numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
return totalL1CommitGas, nil
}
// EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately.
func EstimateBatchL1CommitGas(b *encoding.Batch) (uint64, error) {
var totalL1CommitGas uint64
// Add extra gas costs
totalL1CommitGas += 100000 // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 21000 // base fee for tx
totalL1CommitGas += CalldataNonZeroByteGas // version in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += GetKeccak256Gas(89 + 32) // parent batch header hash, length is estimated as 89 (constant part)+ 32 (1 skippedL1MessageBitmap)
totalL1CommitGas += CalldataNonZeroByteGas * (89 + 32) // parent batch header in calldata
// adjust batch data hash gas cost
totalL1CommitGas += GetKeccak256Gas(uint64(32 * len(b.Chunks)))
totalL1MessagePoppedBefore := b.TotalL1MessagePoppedBefore
for _, chunk := range b.Chunks {
chunkL1CommitGas, err := EstimateChunkL1CommitGas(chunk)
if err != nil {
return 0, err
}
totalL1CommitGas += chunkL1CommitGas
totalL1MessagePoppedInChunk := chunk.NumL1Messages(totalL1MessagePoppedBefore)
totalL1MessagePoppedBefore += totalL1MessagePoppedInChunk
totalL1CommitGas += CalldataNonZeroByteGas * (32 * (totalL1MessagePoppedInChunk + 255) / 256)
totalL1CommitGas += GetKeccak256Gas(89 + 32*(totalL1MessagePoppedInChunk+255)/256)
totalL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
return 0, err
}
totalL1CommitGas += GetMemoryExpansionCost(totalL1CommitCalldataSize)
}
return totalL1CommitGas, nil
}
// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) (uint64, error) {
var totalL1CommitCalldataSize uint64
for _, chunk := range b.Chunks {
chunkL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
return 0, err
}
totalL1CommitCalldataSize += chunkL1CommitCalldataSize
}
return totalL1CommitCalldataSize, nil
}
func getTxPayloadLength(txData *types.TransactionData) (uint64, error) {
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(txData)
if err != nil {
return 0, err
}
return uint64(len(rlpTxData)), nil
}

View File

@@ -1,597 +0,0 @@
package codecv0
import (
"encoding/hex"
"encoding/json"
"math/big"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types/encoding"
)
func TestCodecV0(t *testing.T) {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
parentDABatch, err := NewDABatch(&encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: nil,
})
assert.NoError(t, err)
parentBatchHash := parentDABatch.Hash()
block1 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
block2 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
block3 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
block4 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
block5 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
block6 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
blockL1CommitCalldataSize, err := EstimateBlockL1CommitCalldataSize(block1)
assert.NoError(t, err)
assert.Equal(t, uint64(298), blockL1CommitCalldataSize)
blockL1CommitGas, err := EstimateBlockL1CommitGas(block1)
assert.NoError(t, err)
assert.Equal(t, uint64(4900), blockL1CommitGas)
blockL1CommitCalldataSize, err = EstimateBlockL1CommitCalldataSize(block2)
assert.NoError(t, err)
assert.Equal(t, uint64(5745), blockL1CommitCalldataSize)
blockL1CommitGas, err = EstimateBlockL1CommitGas(block2)
assert.NoError(t, err)
assert.Equal(t, uint64(93613), blockL1CommitGas)
blockL1CommitCalldataSize, err = EstimateBlockL1CommitCalldataSize(block3)
assert.NoError(t, err)
assert.Equal(t, uint64(96), blockL1CommitCalldataSize)
blockL1CommitGas, err = EstimateBlockL1CommitGas(block3)
assert.NoError(t, err)
assert.Equal(t, uint64(4187), blockL1CommitGas)
blockL1CommitCalldataSize, err = EstimateBlockL1CommitCalldataSize(block4)
assert.NoError(t, err)
assert.Equal(t, uint64(60), blockL1CommitCalldataSize)
blockL1CommitGas, err = EstimateBlockL1CommitGas(block4)
assert.NoError(t, err)
assert.Equal(t, uint64(14020), blockL1CommitGas)
blockL1CommitCalldataSize, err = EstimateBlockL1CommitCalldataSize(block5)
assert.NoError(t, err)
assert.Equal(t, uint64(60), blockL1CommitCalldataSize)
blockL1CommitGas, err = EstimateBlockL1CommitGas(block5)
assert.NoError(t, err)
assert.Equal(t, uint64(8796), blockL1CommitGas)
blockL1CommitCalldataSize, err = EstimateBlockL1CommitCalldataSize(block6)
assert.NoError(t, err)
assert.Equal(t, uint64(60), blockL1CommitCalldataSize)
blockL1CommitGas, err = EstimateBlockL1CommitGas(block6)
assert.NoError(t, err)
assert.Equal(t, uint64(6184), blockL1CommitGas)
// Test case: when the batch and chunk contains one block.
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block1},
}
chunkL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(298), chunkL1CommitCalldataSize)
chunkL1CommitGas, err := EstimateChunkL1CommitGas(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(6042), chunkL1CommitGas)
daChunk, err := NewDAChunk(chunk, 0)
assert.NoError(t, err)
chunkBytes, err := daChunk.Encode()
assert.NoError(t, err)
chunkHexString := hex.EncodeToString(chunkBytes)
assert.Equal(t, 299, len(chunkBytes))
assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000001de9000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", chunkHexString)
daChunkHash, err := daChunk.Hash()
assert.NoError(t, err)
assert.Equal(t, common.HexToHash("0xde642c68122634b33fa1e6e4243b17be3bfd0dc6f996f204ef6d7522516bd840"), daChunkHash)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: parentBatchHash,
Chunks: []*encoding.Chunk{chunk},
}
batchL1CommitCalldataSize, err := EstimateBatchL1CommitCalldataSize(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(298), batchL1CommitCalldataSize)
batchL1CommitGas, err := EstimateBatchL1CommitGas(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(162591), batchL1CommitGas)
daBatch, err := NewDABatch(batch)
assert.NoError(t, err)
batchBytes := daBatch.Encode()
batchHexString := hex.EncodeToString(batchBytes)
assert.Equal(t, 89, len(batchBytes))
assert.Equal(t, "000000000000000001000000000000000000000000000000008fbc5eecfefc5bd9d1618ecef1fed160a7838448383595a2257d4c9bd5c5fa3eb0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab0", batchHexString)
assert.Equal(t, 0, len(daBatch.SkippedL1MessageBitmap))
assert.Equal(t, uint64(0), daBatch.TotalL1MessagePopped)
assert.Equal(t, uint64(0), daBatch.L1MessagePopped)
assert.Equal(t, common.HexToHash("0xa906c7d2b6b68ea5fec3ff9d60d41858676e0d365e5d5ef07b2ce20fcf24ecd7"), daBatch.Hash())
decodedDABatch, err := NewDABatchFromBytes(batchBytes)
assert.NoError(t, err)
decodedBatchBytes := decodedDABatch.Encode()
decodedBatchHexString := hex.EncodeToString(decodedBatchBytes)
assert.Equal(t, batchHexString, decodedBatchHexString)
// Test case: when the batch and chunk contains two block.
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block1, block2},
}
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(6043), chunkL1CommitCalldataSize)
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(100742), chunkL1CommitGas)
daChunk, err = NewDAChunk(chunk, 0)
assert.NoError(t, err)
chunkBytes, err = daChunk.Encode()
assert.NoError(t, err)
assert.Equal(t, 6044, len(chunkBytes))
daChunkHash, err = daChunk.Hash()
assert.NoError(t, err)
assert.Equal(t, common.HexToHash("0x014916a83eccdb0d01e814b4d4ab90eb9049ba9a3cb0994919b86ad873bcd028"), daChunkHash)
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: parentBatchHash,
Chunks: []*encoding.Chunk{chunk},
}
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(6043), batchL1CommitCalldataSize)
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(257897), batchL1CommitGas)
daBatch, err = NewDABatch(batch)
assert.NoError(t, err)
batchBytes = daBatch.Encode()
batchHexString = hex.EncodeToString(batchBytes)
assert.Equal(t, 89, len(batchBytes))
assert.Equal(t, "0000000000000000010000000000000000000000000000000074dd561a36921590926bee01fd0d53747c5f3e48e48a2d5538b9ab0e1511cfd7b0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab0", batchHexString)
assert.Equal(t, 0, len(daBatch.SkippedL1MessageBitmap))
assert.Equal(t, uint64(0), daBatch.TotalL1MessagePopped)
assert.Equal(t, uint64(0), daBatch.L1MessagePopped)
assert.Equal(t, common.HexToHash("0xb02e39b740756824d20b2cac322ac365121411ced9d6e34de98a0b247c6e23e6"), daBatch.Hash())
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
assert.NoError(t, err)
decodedBatchBytes = decodedDABatch.Encode()
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
assert.Equal(t, batchHexString, decodedBatchHexString)
// Test case: when the chunk contains one block with 1 L1MsgTx.
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block3},
}
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(96), chunkL1CommitCalldataSize)
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(5329), chunkL1CommitGas)
daChunk, err = NewDAChunk(chunk, 0)
assert.NoError(t, err)
chunkBytes, err = daChunk.Encode()
assert.NoError(t, err)
chunkHexString = hex.EncodeToString(chunkBytes)
assert.Equal(t, 97, len(chunkBytes))
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", chunkHexString)
daChunkHash, err = daChunk.Hash()
assert.NoError(t, err)
assert.Equal(t, common.HexToHash("0x9e643c8a9203df542e39d9bfdcb07c99575b3c3d557791329fef9d83cc4147d0"), daChunkHash)
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: parentBatchHash,
Chunks: []*encoding.Chunk{chunk},
}
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(96), batchL1CommitCalldataSize)
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(161889), batchL1CommitGas)
daBatch, err = NewDABatch(batch)
assert.NoError(t, err)
batchBytes = daBatch.Encode()
batchHexString = hex.EncodeToString(batchBytes)
assert.Equal(t, 121, len(batchBytes))
assert.Equal(t, "000000000000000001000000000000000b000000000000000b34f419ce7e882295bdb5aec6cce56ffa788a5fed4744d7fbd77e4acbf409f1cab0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab000000000000000000000000000000000000000000000000000000000000003ff", batchHexString)
assert.Equal(t, 32, len(daBatch.SkippedL1MessageBitmap))
expectedBitmap := "00000000000000000000000000000000000000000000000000000000000003ff"
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
assert.Equal(t, uint64(11), daBatch.TotalL1MessagePopped)
assert.Equal(t, uint64(11), daBatch.L1MessagePopped)
assert.Equal(t, common.HexToHash("0xa18f07cb56ab4f2db5914d9b5699c5932bea4b5c73e71c8cec79151c11e9e986"), daBatch.Hash())
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
assert.NoError(t, err)
decodedBatchBytes = decodedDABatch.Encode()
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
assert.Equal(t, batchHexString, decodedBatchHexString)
// Test case: batch contains multiple chunks, chunk contains multiple blocks.
chunk1 := &encoding.Chunk{
Blocks: []*encoding.Block{block1, block2, block3},
}
chunk1L1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk1)
assert.NoError(t, err)
assert.Equal(t, uint64(6139), chunk1L1CommitCalldataSize)
chunk1L1CommitGas, err := EstimateChunkL1CommitGas(chunk1)
assert.NoError(t, err)
assert.Equal(t, uint64(106025), chunk1L1CommitGas)
daChunk1, err := NewDAChunk(chunk1, 0)
assert.NoError(t, err)
chunkBytes1, err := daChunk1.Encode()
assert.NoError(t, err)
assert.Equal(t, 6140, len(chunkBytes1))
chunk2 := &encoding.Chunk{
Blocks: []*encoding.Block{block4},
}
chunk2L1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk2)
assert.NoError(t, err)
assert.Equal(t, uint64(60), chunk2L1CommitCalldataSize)
chunk2L1CommitGas, err := EstimateChunkL1CommitGas(chunk2)
assert.NoError(t, err)
assert.Equal(t, uint64(15189), chunk2L1CommitGas)
daChunk2, err := NewDAChunk(chunk2, 0)
assert.NoError(t, err)
chunkBytes2, err := daChunk2.Encode()
assert.NoError(t, err)
assert.Equal(t, 61, len(chunkBytes2))
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: parentBatchHash,
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(6199), batchL1CommitCalldataSize)
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(279054), batchL1CommitGas)
daBatch, err = NewDABatch(batch)
assert.NoError(t, err)
batchBytes = daBatch.Encode()
batchHexString = hex.EncodeToString(batchBytes)
assert.Equal(t, 121, len(batchBytes))
assert.Equal(t, "000000000000000001000000000000002a000000000000002a1f9b3d942a6ee14e7afc52225c91fa44faa0a7ec511df9a2d9348d33bcd142fcb0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab00000000000000000000000000000000000000000000000000000001ffffffbff", batchHexString)
assert.Equal(t, 32, len(daBatch.SkippedL1MessageBitmap))
expectedBitmap = "0000000000000000000000000000000000000000000000000000001ffffffbff"
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
assert.Equal(t, uint64(42), daBatch.TotalL1MessagePopped)
assert.Equal(t, uint64(42), daBatch.L1MessagePopped)
assert.Equal(t, common.HexToHash("0xf7bd6afe02764e4e6df23a374d753182b57fa77be71aaf1cd8365e15a51872d1"), daBatch.Hash())
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
assert.NoError(t, err)
decodedBatchBytes = decodedDABatch.Encode()
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
assert.Equal(t, batchHexString, decodedBatchHexString)
// Test case: many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs.
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block4},
}
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(60), chunkL1CommitCalldataSize)
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(15189), chunkL1CommitGas)
daChunk, err = NewDAChunk(chunk, 0)
assert.NoError(t, err)
chunkBytes, err = daChunk.Encode()
assert.NoError(t, err)
assert.Equal(t, 61, len(chunkBytes))
daChunkHash, err = daChunk.Hash()
assert.NoError(t, err)
assert.Equal(t, common.HexToHash("0x854fc3136f47ce482ec85ee3325adfa16a1a1d60126e1c119eaaf0c3a9e90f8e"), daChunkHash)
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 37,
ParentBatchHash: parentBatchHash,
Chunks: []*encoding.Chunk{chunk},
}
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(60), batchL1CommitCalldataSize)
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(171730), batchL1CommitGas)
daBatch, err = NewDABatch(batch)
assert.NoError(t, err)
batchBytes = daBatch.Encode()
batchHexString = hex.EncodeToString(batchBytes)
assert.Equal(t, 121, len(batchBytes))
assert.Equal(t, "0000000000000000010000000000000005000000000000002ac62fb58ec2d5393e00960f1cc23cab883b685296efa03d13ea2dd4c6de79cc55b0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab00000000000000000000000000000000000000000000000000000000000000000", batchHexString)
assert.Equal(t, 32, len(daBatch.SkippedL1MessageBitmap))
expectedBitmap = "0000000000000000000000000000000000000000000000000000000000000000"
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
assert.Equal(t, uint64(42), daBatch.TotalL1MessagePopped)
assert.Equal(t, uint64(5), daBatch.L1MessagePopped)
assert.Equal(t, common.HexToHash("0x841f4657b7eb723cae35377cf2963b51191edad6a3b182d4c8524cb928d2a413"), daBatch.Hash())
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
assert.NoError(t, err)
decodedBatchBytes = decodedDABatch.Encode()
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
assert.Equal(t, batchHexString, decodedBatchHexString)
// Test case: many consecutive L1 Msgs in 1 bitmap, with leading skipped msgs.
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block4},
}
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(60), chunkL1CommitCalldataSize)
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(15189), chunkL1CommitGas)
daChunk, err = NewDAChunk(chunk, 0)
assert.NoError(t, err)
chunkBytes, err = daChunk.Encode()
assert.NoError(t, err)
assert.Equal(t, 61, len(chunkBytes))
daChunkHash, err = daChunk.Hash()
assert.NoError(t, err)
assert.Equal(t, common.HexToHash("0x854fc3136f47ce482ec85ee3325adfa16a1a1d60126e1c119eaaf0c3a9e90f8e"), daChunkHash)
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: parentBatchHash,
Chunks: []*encoding.Chunk{chunk},
}
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(60), batchL1CommitCalldataSize)
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(171810), batchL1CommitGas)
daBatch, err = NewDABatch(batch)
assert.NoError(t, err)
batchBytes = daBatch.Encode()
batchHexString = hex.EncodeToString(batchBytes)
assert.Equal(t, 121, len(batchBytes))
assert.Equal(t, "000000000000000001000000000000002a000000000000002a93255aa24dd468c5645f1e6901b8131a7a78a0eeb2a17cbb09ba64688a8de6b4b0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab00000000000000000000000000000000000000000000000000000001fffffffff", batchHexString)
assert.Equal(t, 32, len(daBatch.SkippedL1MessageBitmap))
expectedBitmap = "0000000000000000000000000000000000000000000000000000001fffffffff"
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
assert.Equal(t, uint64(42), daBatch.TotalL1MessagePopped)
assert.Equal(t, uint64(42), daBatch.L1MessagePopped)
assert.Equal(t, common.HexToHash("0xa28766a3617cf244cc397fc4ce4c23022ec80f152b9f618807ac7e7c11486612"), daBatch.Hash())
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
assert.NoError(t, err)
decodedBatchBytes = decodedDABatch.Encode()
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
assert.Equal(t, batchHexString, decodedBatchHexString)
// Test case: many sparse L1 Msgs in 1 bitmap.
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block5},
}
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(60), chunkL1CommitCalldataSize)
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(9947), chunkL1CommitGas)
daChunk, err = NewDAChunk(chunk, 0)
assert.NoError(t, err)
chunkBytes, err = daChunk.Encode()
assert.NoError(t, err)
assert.Equal(t, 61, len(chunkBytes))
daChunkHash, err = daChunk.Hash()
assert.NoError(t, err)
assert.Equal(t, common.HexToHash("0x2aa220ca7bd1368e59e8053eb3831e30854aa2ec8bd3af65cee350c1c0718ba6"), daChunkHash)
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: parentBatchHash,
Chunks: []*encoding.Chunk{chunk},
}
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(60), batchL1CommitCalldataSize)
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(166504), batchL1CommitGas)
daBatch, err = NewDABatch(batch)
assert.NoError(t, err)
batchBytes = daBatch.Encode()
batchHexString = hex.EncodeToString(batchBytes)
assert.Equal(t, 121, len(batchBytes))
assert.Equal(t, "000000000000000001000000000000000a000000000000000ac7bcc8da943dd83404e84d9ce7e894ab97ce4829df4eb51ebbbe13c90b5a3f4db0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab000000000000000000000000000000000000000000000000000000000000001dd", batchHexString)
assert.Equal(t, 32, len(daBatch.SkippedL1MessageBitmap))
expectedBitmap = "00000000000000000000000000000000000000000000000000000000000001dd"
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
assert.Equal(t, uint64(10), daBatch.TotalL1MessagePopped)
assert.Equal(t, uint64(10), daBatch.L1MessagePopped)
assert.Equal(t, common.HexToHash("0x2fee2073639eb9795007f7e765b3318f92658822de40b2134d34a478a0e9058a"), daBatch.Hash())
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
assert.NoError(t, err)
decodedBatchBytes = decodedDABatch.Encode()
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
assert.Equal(t, batchHexString, decodedBatchHexString)
// Test case: many L1 Msgs in each of 2 bitmaps.
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block6},
}
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(60), chunkL1CommitCalldataSize)
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
assert.NoError(t, err)
assert.Equal(t, uint64(7326), chunkL1CommitGas)
daChunk, err = NewDAChunk(chunk, 0)
assert.NoError(t, err)
chunkBytes, err = daChunk.Encode()
assert.NoError(t, err)
assert.Equal(t, 61, len(chunkBytes))
daChunkHash, err = daChunk.Hash()
assert.NoError(t, err)
assert.Equal(t, common.HexToHash("0xb65521bea7daff75838de07951c3c055966750fb5a270fead5e0e727c32455c3"), daChunkHash)
batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: parentBatchHash,
Chunks: []*encoding.Chunk{chunk},
}
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(60), batchL1CommitCalldataSize)
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
assert.NoError(t, err)
assert.Equal(t, uint64(164388), batchL1CommitGas)
daBatch, err = NewDABatch(batch)
assert.NoError(t, err)
batchBytes = daBatch.Encode()
batchHexString = hex.EncodeToString(batchBytes)
assert.Equal(t, 153, len(batchBytes))
assert.Equal(t, "00000000000000000100000000000001010000000000000101899a411a3309c6491701b7b955c7b1115ac015414bbb71b59a0ca561668d5208b0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd0000000000000000000000000000000000000000000000000000000000000000", batchHexString)
assert.Equal(t, 64, len(daBatch.SkippedL1MessageBitmap))
expectedBitmap = "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd0000000000000000000000000000000000000000000000000000000000000000"
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
assert.Equal(t, uint64(257), daBatch.TotalL1MessagePopped)
assert.Equal(t, uint64(257), daBatch.L1MessagePopped)
assert.Equal(t, common.HexToHash("0x84206bc6d0076a233fc7120a0bec4e03bf2512207437768828384dddb335ba2e"), daBatch.Hash())
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
assert.NoError(t, err)
decodedBatchBytes = decodedDABatch.Encode()
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
assert.Equal(t, batchHexString, decodedBatchHexString)
}
func TestErrorPaths(t *testing.T) {
// Test case: when the chunk is nil.
_, err := NewDAChunk(nil, 100)
assert.Error(t, err)
assert.Contains(t, err.Error(), "chunk is nil")
// Test case: when the chunk contains no blocks.
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{},
}
_, err = NewDAChunk(chunk, 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of blocks is 0")
// Test case: when the chunk contains more than 255 blocks.
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{},
}
for i := 0; i < 256; i++ {
chunk.Blocks = append(chunk.Blocks, &encoding.Block{})
}
_, err = NewDAChunk(chunk, 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte")
// Test case: Header.Number is not a uint64.
block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
block.Header.Number = new(big.Int).Lsh(block.Header.Number, 64)
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
_, err = NewDAChunk(chunk, 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "block number is not uint64")
// Test case: number of transactions exceeds max uint16.
block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
for i := 0; i < 65537; i++ {
block.Transactions = append(block.Transactions, block.Transactions[0])
}
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
_, err = NewDAChunk(chunk, 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
// Test case: decode transaction with hex string without 0x prefix error.
block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
block.Transactions = block.Transactions[:1]
block.Transactions[0].Data = "not-a-hex"
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
_, err = EstimateChunkL1CommitCalldataSize(chunk)
assert.Error(t, err)
assert.Contains(t, err.Error(), "hex string without 0x prefix")
_, err = EstimateChunkL1CommitGas(chunk)
assert.Error(t, err)
assert.Contains(t, err.Error(), "hex string without 0x prefix")
// Test case: number of L1 messages exceeds max uint16.
block = readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
for i := 0; i < 65535; i++ {
tx := &block.Transactions[i]
txCopy := *tx
txCopy.Nonce = uint64(i + 1)
block.Transactions = append(block.Transactions, txCopy)
}
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
_, err = NewDAChunk(chunk, 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
}
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
data, err := os.ReadFile(filename)
assert.NoError(t, err)
block := &encoding.Block{}
assert.NoError(t, json.Unmarshal(data, block))
return block
}

View File

@@ -1,508 +0,0 @@
package codecv1
import (
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"math/big"
"strings"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types/encoding"
)
var (
// BLSModulus is the BLS modulus defined in EIP-4844.
BLSModulus *big.Int
// BlobDataProofArgs defines the argument types for `_blobDataProof` in `finalizeBatchWithProof4844`.
BlobDataProofArgs abi.Arguments
// MaxNumChunks is the maximum number of chunks that a batch can contain.
MaxNumChunks int = 15
)
func init() {
// initialize modulus
modulus, success := new(big.Int).SetString("52435875175126190479447740508185965837690552500527637822603658699938581184513", 10)
if !success {
log.Crit("BLSModulus conversion failed")
}
BLSModulus = modulus
// initialize arguments
bytes32Type, err1 := abi.NewType("bytes32", "bytes32", nil)
bytes48Type, err2 := abi.NewType("bytes48", "bytes48", nil)
if err1 != nil || err2 != nil {
log.Crit("Failed to initialize abi types", "err1", err1, "err2", err2)
}
BlobDataProofArgs = abi.Arguments{
{Type: bytes32Type, Name: "z"},
{Type: bytes32Type, Name: "y"},
{Type: bytes48Type, Name: "commitment"},
{Type: bytes48Type, Name: "proof"},
}
}
// CodecV1Version denotes the version of the codec.
const CodecV1Version = 1
// DABlock represents a Data Availability Block.
type DABlock struct {
BlockNumber uint64
Timestamp uint64
BaseFee *big.Int
GasLimit uint64
NumTransactions uint16
NumL1Messages uint16
}
// DAChunk groups consecutive DABlocks with their transactions.
type DAChunk struct {
Blocks []*DABlock
Transactions [][]*types.TransactionData
}
// DABatch contains metadata about a batch of DAChunks.
type DABatch struct {
// header
Version uint8
BatchIndex uint64
L1MessagePopped uint64
TotalL1MessagePopped uint64
DataHash common.Hash
BlobVersionedHash common.Hash
ParentBatchHash common.Hash
SkippedL1MessageBitmap []byte
// blob payload
blob *kzg4844.Blob
z *kzg4844.Point
}
// NewDABlock creates a new DABlock from the given encoding.Block and the total number of L1 messages popped before.
func NewDABlock(block *encoding.Block, totalL1MessagePoppedBefore uint64) (*DABlock, error) {
if !block.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
// note: numL1Messages includes skipped messages
numL1Messages := block.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
// note: numTransactions includes skipped messages
numL2Transactions := block.NumL2Transactions()
numTransactions := numL1Messages + numL2Transactions
if numTransactions > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
daBlock := DABlock{
BlockNumber: block.Header.Number.Uint64(),
Timestamp: block.Header.Time,
BaseFee: block.Header.BaseFee,
GasLimit: block.Header.GasLimit,
NumTransactions: uint16(numTransactions),
NumL1Messages: uint16(numL1Messages),
}
return &daBlock, nil
}
// Encode serializes the DABlock into a slice of bytes.
func (b *DABlock) Encode() []byte {
bytes := make([]byte, 60)
binary.BigEndian.PutUint64(bytes[0:], b.BlockNumber)
binary.BigEndian.PutUint64(bytes[8:], b.Timestamp)
if b.BaseFee != nil {
binary.BigEndian.PutUint64(bytes[40:], b.BaseFee.Uint64())
}
binary.BigEndian.PutUint64(bytes[48:], b.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], b.NumTransactions)
binary.BigEndian.PutUint16(bytes[58:], b.NumL1Messages)
return bytes
}
// NewDAChunk creates a new DAChunk from the given encoding.Chunk and the total number of L1 messages popped before.
func NewDAChunk(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64) (*DAChunk, error) {
var blocks []*DABlock
var txs [][]*types.TransactionData
for _, block := range chunk.Blocks {
b, err := NewDABlock(block, totalL1MessagePoppedBefore)
if err != nil {
return nil, err
}
blocks = append(blocks, b)
totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore)
txs = append(txs, block.Transactions)
}
daChunk := DAChunk{
Blocks: blocks,
Transactions: txs,
}
return &daChunk, nil
}
// Encode serializes the DAChunk into a slice of bytes.
func (c *DAChunk) Encode() []byte {
var chunkBytes []byte
chunkBytes = append(chunkBytes, byte(len(c.Blocks)))
for _, block := range c.Blocks {
blockBytes := block.Encode()
chunkBytes = append(chunkBytes, blockBytes...)
}
return chunkBytes
}
// Hash computes the hash of the DAChunk data.
func (c *DAChunk) Hash() (common.Hash, error) {
var dataBytes []byte
// concatenate block contexts
for _, block := range c.Blocks {
encodedBlock := block.Encode()
// only the first 58 bytes are used in the hashing process
dataBytes = append(dataBytes, encodedBlock[:58]...)
}
// concatenate l1 tx hashes
for _, blockTxs := range c.Transactions {
for _, txData := range blockTxs {
if txData.Type == types.L1MessageTxType {
txHash := strings.TrimPrefix(txData.TxHash, "0x")
hashBytes, err := hex.DecodeString(txHash)
if err != nil {
return common.Hash{}, err
}
if len(hashBytes) != 32 {
return common.Hash{}, fmt.Errorf("unexpected hash: %s", txData.TxHash)
}
dataBytes = append(dataBytes, hashBytes...)
}
}
}
hash := crypto.Keccak256Hash(dataBytes)
return hash, nil
}
// NewDABatch creates a DABatch from the provided encoding.Batch.
func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
// this encoding can only support a fixed number of chunks per batch
if len(batch.Chunks) > MaxNumChunks {
return nil, fmt.Errorf("too many chunks in batch")
}
if len(batch.Chunks) == 0 {
return nil, fmt.Errorf("too few chunks in batch")
}
// batch data hash
dataHash, err := computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore)
if err != nil {
return nil, err
}
// skipped L1 messages bitmap
bitmapBytes, totalL1MessagePoppedAfter, err := encoding.ConstructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore)
if err != nil {
return nil, err
}
// blob payload
blob, blobVersionedHash, z, err := constructBlobPayload(batch.Chunks)
if err != nil {
return nil, err
}
daBatch := DABatch{
Version: CodecV1Version,
BatchIndex: batch.Index,
L1MessagePopped: totalL1MessagePoppedAfter - batch.TotalL1MessagePoppedBefore,
TotalL1MessagePopped: totalL1MessagePoppedAfter,
DataHash: dataHash,
BlobVersionedHash: blobVersionedHash,
ParentBatchHash: batch.ParentBatchHash,
SkippedL1MessageBitmap: bitmapBytes,
blob: blob,
z: z,
}
return &daBatch, nil
}
// computeBatchDataHash computes the data hash of the batch.
// Note: The batch hash and batch data hash are two different hashes,
// the former is used for identifying a badge in the contracts,
// the latter is used in the public input to the provers.
func computeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore uint64) (common.Hash, error) {
var dataBytes []byte
totalL1MessagePoppedBeforeChunk := totalL1MessagePoppedBefore
for _, chunk := range chunks {
daChunk, err := NewDAChunk(chunk, totalL1MessagePoppedBeforeChunk)
if err != nil {
return common.Hash{}, err
}
totalL1MessagePoppedBeforeChunk += chunk.NumL1Messages(totalL1MessagePoppedBeforeChunk)
chunkHash, err := daChunk.Hash()
if err != nil {
return common.Hash{}, err
}
dataBytes = append(dataBytes, chunkHash.Bytes()...)
}
dataHash := crypto.Keccak256Hash(dataBytes)
return dataHash, nil
}
// constructBlobPayload constructs the 4844 blob payload.
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + MaxNumChunks*4
// the raw (un-padded) blob payload
blobBytes := make([]byte, metadataLength)
// challenge digest preimage
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
challengePreimage := make([]byte, (1+MaxNumChunks+1)*32)
// the chunk data hash used for calculating the challenge preimage
var chunkDataHash common.Hash
// blob metadata: num_chunks
binary.BigEndian.PutUint16(blobBytes[0:], uint16(len(chunks)))
// encode blob metadata and L2 transactions,
// and simultaneously also build challenge preimage
for chunkID, chunk := range chunks {
currentChunkStartIndex := len(blobBytes)
for _, block := range chunk.Blocks {
for _, tx := range block.Transactions {
if tx.Type != types.L1MessageTxType {
// encode L2 txs into blob payload
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
if err != nil {
return nil, common.Hash{}, nil, err
}
blobBytes = append(blobBytes, rlpTxData...)
}
}
}
// blob metadata: chunki_size
if chunkSize := len(blobBytes) - currentChunkStartIndex; chunkSize != 0 {
binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize))
}
// challenge: compute chunk data hash
chunkDataHash = crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:])
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
}
// if we have fewer than MaxNumChunks chunks, the rest
// of the blob metadata is correctly initialized to 0,
// but we need to add padding to the challenge preimage
for chunkID := len(chunks); chunkID < MaxNumChunks; chunkID++ {
// use the last chunk's data hash as padding
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
}
// challenge: compute metadata hash
hash := crypto.Keccak256Hash(blobBytes[0:metadataLength])
copy(challengePreimage[0:], hash[:])
// convert raw data to BLSFieldElements
blob, err := makeBlobCanonical(blobBytes)
if err != nil {
return nil, common.Hash{}, nil, err
}
// compute blob versioned hash
c, err := kzg4844.BlobToCommitment(*blob)
if err != nil {
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
}
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
// challenge: append blob versioned hash
copy(challengePreimage[(1+MaxNumChunks)*32:], blobVersionedHash[:])
// compute z = challenge_digest % BLS_MODULUS
challengeDigest := crypto.Keccak256Hash(challengePreimage)
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
pointBytes := pointBigInt.Bytes()
// the challenge point z
var z kzg4844.Point
start := 32 - len(pointBytes)
copy(z[start:], pointBytes)
return blob, blobVersionedHash, &z, nil
}
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.
func makeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) {
// blob contains 131072 bytes but we can only utilize 31/32 of these
if len(blobBytes) > 126976 {
return nil, fmt.Errorf("oversized batch payload")
}
// the canonical (padded) blob payload
var blob kzg4844.Blob
// encode blob payload by prepending every 31 bytes with 1 zero byte
index := 0
for from := 0; from < len(blobBytes); from += 31 {
to := from + 31
if to > len(blobBytes) {
to = len(blobBytes)
}
copy(blob[index+1:], blobBytes[from:to])
index += 32
}
return &blob, nil
}
// NewDABatchFromBytes attempts to decode the given byte slice into a DABatch.
// Note: This function only populates the batch header, it leaves the blob-related fields empty.
func NewDABatchFromBytes(data []byte) (*DABatch, error) {
if len(data) < 121 {
return nil, fmt.Errorf("insufficient data for DABatch, expected at least 121 bytes but got %d", len(data))
}
b := &DABatch{
Version: data[0],
BatchIndex: binary.BigEndian.Uint64(data[1:9]),
L1MessagePopped: binary.BigEndian.Uint64(data[9:17]),
TotalL1MessagePopped: binary.BigEndian.Uint64(data[17:25]),
DataHash: common.BytesToHash(data[25:57]),
BlobVersionedHash: common.BytesToHash(data[57:89]),
ParentBatchHash: common.BytesToHash(data[89:121]),
SkippedL1MessageBitmap: data[121:],
}
return b, nil
}
// Encode serializes the DABatch into bytes.
func (b *DABatch) Encode() []byte {
batchBytes := make([]byte, 121+len(b.SkippedL1MessageBitmap))
batchBytes[0] = b.Version
binary.BigEndian.PutUint64(batchBytes[1:], b.BatchIndex)
binary.BigEndian.PutUint64(batchBytes[9:], b.L1MessagePopped)
binary.BigEndian.PutUint64(batchBytes[17:], b.TotalL1MessagePopped)
copy(batchBytes[25:], b.DataHash[:])
copy(batchBytes[57:], b.BlobVersionedHash[:])
copy(batchBytes[89:], b.ParentBatchHash[:])
copy(batchBytes[121:], b.SkippedL1MessageBitmap[:])
return batchBytes
}
// Hash computes the hash of the serialized DABatch.
func (b *DABatch) Hash() common.Hash {
bytes := b.Encode()
return crypto.Keccak256Hash(bytes)
}
// BlobDataProof computes the abi-encoded blob verification data.
func (b *DABatch) BlobDataProof() ([]byte, error) {
if b.blob == nil {
return nil, errors.New("called BlobDataProof with empty blob")
}
if b.z == nil {
return nil, errors.New("called BlobDataProof with empty z")
}
commitment, err := kzg4844.BlobToCommitment(*b.blob)
if err != nil {
return nil, fmt.Errorf("failed to create blob commitment")
}
proof, y, err := kzg4844.ComputeProof(*b.blob, *b.z)
if err != nil {
log.Crit("failed to create KZG proof at point", "err", err, "z", hex.EncodeToString(b.z[:]))
}
// Memory layout of ``_blobDataProof``:
// | z | y | kzg_commitment | kzg_proof |
// |---------|---------|----------------|-----------|
// | bytes32 | bytes32 | bytes48 | bytes48 |
values := []interface{}{*b.z, y, commitment, proof}
return BlobDataProofArgs.Pack(values...)
}
// Blob returns the blob of the batch.
func (b *DABatch) Blob() *kzg4844.Blob {
return b.blob
}
// DecodeFromCalldata attempts to decode a DABatch and an array of DAChunks from the provided calldata byte slice.
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
// TODO: implement this function.
return nil, nil, nil
}
// EstimateChunkL1CommitBlobSize estimates the size of the L1 commit blob for a single chunk.
func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
metadataSize := uint64(2 + 4*MaxNumChunks) // over-estimate: adding metadata length
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
if err != nil {
return 0, err
}
paddedSize := ((metadataSize + chunkDataSize + 30) / 31) * 32
return paddedSize, nil
}
// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
metadataSize := uint64(2 + 4*MaxNumChunks)
var batchDataSize uint64
for _, c := range b.Chunks {
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
if err != nil {
return 0, err
}
batchDataSize += chunkDataSize
}
paddedSize := ((metadataSize + batchDataSize + 30) / 31) * 32
return paddedSize, nil
}
func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
var dataSize uint64
for _, block := range c.Blocks {
for _, tx := range block.Transactions {
if tx.Type != types.L1MessageTxType {
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
if err != nil {
return 0, err
}
dataSize += uint64(len(rlpTxData))
}
}
}
return dataSize, nil
}

File diff suppressed because one or more lines are too long

View File

@@ -1,237 +0,0 @@
package encoding
import (
"fmt"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
)
// CodecVersion defines the version of encoder and decoder.
type CodecVersion int
const (
// CodecV0 represents the version 0 of the encoder and decoder.
CodecV0 CodecVersion = iota
// CodecV1 represents the version 1 of the encoder and decoder.
CodecV1
// txTypeTest is a special transaction type used in unit tests.
txTypeTest = 0xff
)
func init() {
// make sure txTypeTest will not interfere with other transaction types
if txTypeTest == types.LegacyTxType || txTypeTest == types.AccessListTxType || txTypeTest == types.DynamicFeeTxType || txTypeTest == types.BlobTxType || txTypeTest == types.L1MessageTxType {
log.Crit("txTypeTest is overlapping with existing transaction types")
}
}
// Block represents an L2 block.
type Block struct {
Header *types.Header
Transactions []*types.TransactionData
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
RowConsumption *types.RowConsumption `json:"row_consumption,omitempty"`
}
// Chunk represents a group of blocks.
type Chunk struct {
Blocks []*Block `json:"blocks"`
}
// Batch represents a batch of chunks.
type Batch struct {
Index uint64
TotalL1MessagePoppedBefore uint64
ParentBatchHash common.Hash
Chunks []*Chunk
}
// NumL1Messages returns the number of L1 messages in this block.
// This number is the sum of included and skipped L1 messages.
func (b *Block) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var lastQueueIndex *uint64
for _, txData := range b.Transactions {
if txData.Type == types.L1MessageTxType {
lastQueueIndex = &txData.Nonce
}
}
if lastQueueIndex == nil {
return 0
}
// note: last queue index included before this block is totalL1MessagePoppedBefore - 1
// TODO: cache results
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
}
// NumL2Transactions returns the number of L2 transactions in this block.
func (b *Block) NumL2Transactions() uint64 {
var count uint64
for _, txData := range b.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}
// NumL1Messages returns the number of L1 messages in this chunk.
// This number is the sum of included and skipped L1 messages.
func (c *Chunk) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
var numL1Messages uint64
for _, block := range c.Blocks {
numL1MessagesInBlock := block.NumL1Messages(totalL1MessagePoppedBefore)
numL1Messages += numL1MessagesInBlock
totalL1MessagePoppedBefore += numL1MessagesInBlock
}
// TODO: cache results
return numL1Messages
}
// ConvertTxDataToRLPEncoding transforms []*TransactionData into []*types.Transaction.
func ConvertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
data, err := hexutil.Decode(txData.Data)
if err != nil {
return nil, fmt.Errorf("failed to decode txData.Data: data=%v, err=%w", txData.Data, err)
}
var tx *types.Transaction
switch txData.Type {
case types.LegacyTxType:
tx = types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
case types.AccessListTxType:
tx = types.NewTx(&types.AccessListTx{
ChainID: txData.ChainId.ToInt(),
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
AccessList: txData.AccessList,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
case types.DynamicFeeTxType:
tx = types.NewTx(&types.DynamicFeeTx{
ChainID: txData.ChainId.ToInt(),
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasTipCap: txData.GasTipCap.ToInt(),
GasFeeCap: txData.GasFeeCap.ToInt(),
Data: data,
AccessList: txData.AccessList,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
case txTypeTest:
// in the tests, we simply use `data` as the RLP-encoded transaction
return data, nil
case types.L1MessageTxType: // L1MessageTxType is not supported
default:
return nil, fmt.Errorf("unsupported tx type: %d", txData.Type)
}
rlpTxData, err := tx.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("failed to marshal binary of the tx: tx=%v, err=%w", tx, err)
}
return rlpTxData, nil
}
// CrcMax calculates the maximum row consumption of crc.
func (c *Chunk) CrcMax() (uint64, error) {
// Map sub-circuit name to row count
crc := make(map[string]uint64)
// Iterate over blocks, accumulate row consumption
for _, block := range c.Blocks {
if block.RowConsumption == nil {
return 0, fmt.Errorf("block (%d, %v) has nil RowConsumption", block.Header.Number, block.Header.Hash().Hex())
}
for _, subCircuit := range *block.RowConsumption {
crc[subCircuit.Name] += subCircuit.RowNumber
}
}
// Find the maximum row consumption
var maxVal uint64
for _, value := range crc {
if value > maxVal {
maxVal = value
}
}
// Return the maximum row consumption
return maxVal, nil
}
// NumTransactions calculates the total number of transactions in a Chunk.
func (c *Chunk) NumTransactions() uint64 {
var totalTxNum uint64
for _, block := range c.Blocks {
totalTxNum += uint64(len(block.Transactions))
}
return totalTxNum
}
// NumL2Transactions calculates the total number of L2 transactions in a Chunk.
func (c *Chunk) NumL2Transactions() uint64 {
var totalTxNum uint64
for _, block := range c.Blocks {
totalTxNum += block.NumL2Transactions()
}
return totalTxNum
}
// L2GasUsed calculates the total gas of L2 transactions in a Chunk.
func (c *Chunk) L2GasUsed() uint64 {
var totalTxNum uint64
for _, block := range c.Blocks {
totalTxNum += block.Header.GasUsed
}
return totalTxNum
}
// StateRoot gets the state root after committing/finalizing the batch.
func (b *Batch) StateRoot() common.Hash {
numChunks := len(b.Chunks)
if len(b.Chunks) == 0 {
return common.Hash{}
}
lastChunkBlockNum := len(b.Chunks[numChunks-1].Blocks)
return b.Chunks[len(b.Chunks)-1].Blocks[lastChunkBlockNum-1].Header.Root
}
// WithdrawRoot gets the withdraw root after committing/finalizing the batch.
func (b *Batch) WithdrawRoot() common.Hash {
numChunks := len(b.Chunks)
if len(b.Chunks) == 0 {
return common.Hash{}
}
lastChunkBlockNum := len(b.Chunks[numChunks-1].Blocks)
return b.Chunks[len(b.Chunks)-1].Blocks[lastChunkBlockNum-1].WithdrawRoot
}

View File

@@ -1,121 +0,0 @@
package encoding
import (
"encoding/json"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"github.com/stretchr/testify/assert"
)
func TestMain(m *testing.M) {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
m.Run()
}
func TestUtilFunctions(t *testing.T) {
block1 := readBlockFromJSON(t, "../../testdata/blockTrace_02.json")
block2 := readBlockFromJSON(t, "../../testdata/blockTrace_03.json")
block3 := readBlockFromJSON(t, "../../testdata/blockTrace_04.json")
block4 := readBlockFromJSON(t, "../../testdata/blockTrace_05.json")
block5 := readBlockFromJSON(t, "../../testdata/blockTrace_06.json")
block6 := readBlockFromJSON(t, "../../testdata/blockTrace_07.json")
chunk1 := &Chunk{Blocks: []*Block{block1, block2}}
chunk2 := &Chunk{Blocks: []*Block{block3, block4}}
chunk3 := &Chunk{Blocks: []*Block{block5, block6}}
batch := &Batch{Chunks: []*Chunk{chunk1, chunk2, chunk3}}
// Test Block methods
assert.Equal(t, uint64(0), block1.NumL1Messages(0))
assert.Equal(t, uint64(2), block1.NumL2Transactions())
assert.Equal(t, uint64(0), block2.NumL1Messages(0))
assert.Equal(t, uint64(1), block2.NumL2Transactions())
assert.Equal(t, uint64(11), block3.NumL1Messages(0))
assert.Equal(t, uint64(1), block3.NumL2Transactions())
assert.Equal(t, uint64(42), block4.NumL1Messages(0))
assert.Equal(t, uint64(0), block4.NumL2Transactions())
assert.Equal(t, uint64(10), block5.NumL1Messages(0))
assert.Equal(t, uint64(0), block5.NumL2Transactions())
assert.Equal(t, uint64(257), block6.NumL1Messages(0))
assert.Equal(t, uint64(0), block6.NumL2Transactions())
// Test Chunk methods
assert.Equal(t, uint64(0), chunk1.NumL1Messages(0))
assert.Equal(t, uint64(3), chunk1.NumL2Transactions())
crc1Max, err := chunk1.CrcMax()
assert.NoError(t, err)
assert.Equal(t, uint64(11), crc1Max)
assert.Equal(t, uint64(3), chunk1.NumTransactions())
assert.Equal(t, uint64(1194994), chunk1.L2GasUsed())
assert.Equal(t, uint64(42), chunk2.NumL1Messages(0))
assert.Equal(t, uint64(1), chunk2.NumL2Transactions())
crc2Max, err := chunk2.CrcMax()
assert.NoError(t, err)
assert.Equal(t, uint64(0), crc2Max)
assert.Equal(t, uint64(7), chunk2.NumTransactions())
assert.Equal(t, uint64(144000), chunk2.L2GasUsed())
assert.Equal(t, uint64(257), chunk3.NumL1Messages(0))
assert.Equal(t, uint64(0), chunk3.NumL2Transactions())
chunk3.Blocks[0].RowConsumption = nil
crc3Max, err := chunk3.CrcMax()
assert.Error(t, err)
assert.EqualError(t, err, "block (17, 0x003fee335455c0c293dda17ea9365fe0caa94071ed7216baf61f7aeb808e8a28) has nil RowConsumption")
assert.Equal(t, uint64(0), crc3Max)
assert.Equal(t, uint64(5), chunk3.NumTransactions())
assert.Equal(t, uint64(240000), chunk3.L2GasUsed())
// Test Batch methods
assert.Equal(t, block6.Header.Root, batch.StateRoot())
assert.Equal(t, block6.WithdrawRoot, batch.WithdrawRoot())
}
func TestConvertTxDataToRLPEncoding(t *testing.T) {
blocks := []*Block{
readBlockFromJSON(t, "../../testdata/blockTrace_02.json"),
readBlockFromJSON(t, "../../testdata/blockTrace_03.json"),
readBlockFromJSON(t, "../../testdata/blockTrace_04.json"),
readBlockFromJSON(t, "../../testdata/blockTrace_05.json"),
readBlockFromJSON(t, "../../testdata/blockTrace_06.json"),
readBlockFromJSON(t, "../../testdata/blockTrace_07.json"),
}
for _, block := range blocks {
for _, txData := range block.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
rlpTxData, err := ConvertTxDataToRLPEncoding(txData)
assert.NoError(t, err)
var tx types.Transaction
err = tx.UnmarshalBinary(rlpTxData)
assert.NoError(t, err)
assert.Equal(t, txData.TxHash, tx.Hash().Hex())
}
}
}
func TestEmptyBatchRoots(t *testing.T) {
emptyBatch := &Batch{Chunks: []*Chunk{}}
assert.Equal(t, common.Hash{}, emptyBatch.StateRoot())
assert.Equal(t, common.Hash{}, emptyBatch.WithdrawRoot())
}
func readBlockFromJSON(t *testing.T, filename string) *Block {
data, err := os.ReadFile(filename)
assert.NoError(t, err)
block := &Block{}
assert.NoError(t, json.Unmarshal(data, block))
return block
}

View File

@@ -9,7 +9,7 @@ import (
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
func CheckScrollProverVersion(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// note the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.92"
var tag = "v4.4.9"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -67,7 +67,7 @@ Commit a batch of transactions on layer 1.
function committedBatches(uint256) external view returns (bytes32)
```
Return the batch hash of a committed batch.
@@ -81,7 +81,7 @@ Return the batch hash of a committed batch.
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
| _0 | bytes32 | The batch hash of a committed batch. |
### finalizeBatchWithProof
@@ -130,7 +130,7 @@ Finalize a committed batch (with blob) on layer 1.
function finalizedStateRoots(uint256) external view returns (bytes32)
```
Return the state root of a committed batch.
@@ -144,7 +144,7 @@ Return the state root of a committed batch.
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
| _0 | bytes32 | The state root of a committed batch. |
### importGenesisBatch
@@ -160,8 +160,8 @@ Import layer 2 genesis block
| Name | Type | Description |
|---|---|---|
| _batchHeader | bytes | undefined |
| _stateRoot | bytes32 | undefined |
| _batchHeader | bytes | The header of the genesis batch. |
| _stateRoot | bytes32 | The state root of the genesis block. |
### initialize
@@ -187,7 +187,7 @@ Initialize the storage of ScrollChain.
function isBatchFinalized(uint256 _batchIndex) external view returns (bool)
```
Return whether the batch is finalized by batch index.
@@ -201,7 +201,7 @@ Return whether the batch is finalized by batch index.
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
| _0 | bool | Whether the batch is finalized by batch index. |
### isProver
@@ -253,7 +253,7 @@ Whether an account is a sequencer.
function lastFinalizedBatchIndex() external view returns (uint256)
```
The latest finalized batch index.
@@ -262,7 +262,7 @@ The latest finalized batch index.
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
| _0 | uint256 | The latest finalized batch index. |
### layer2ChainId
@@ -480,7 +480,7 @@ The address of RollupVerifier.
function withdrawRoots(uint256) external view returns (bytes32)
```
Return the message root of a committed batch.
@@ -494,7 +494,7 @@ Return the message root of a committed batch.
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
| _0 | bytes32 | The message root of a committed batch. |
@@ -882,17 +882,6 @@ error ErrorIncorrectPreviousStateRoot()
*Thrown when the previous state root doesn&#39;t match stored one.*
### ErrorInvalidBatchHeaderVersion
```solidity
error ErrorInvalidBatchHeaderVersion()
```
*Thrown when the batch header version is invalid.*
### ErrorLastL1MessageSkipped
```solidity

View File

@@ -98,19 +98,6 @@ describe("ScrollChain.blob", async () => {
batchHeader0[25] = 1;
});
it("should revert when ErrorInvalidBatchHeaderVersion", async () => {
const header = new Uint8Array(121);
header[0] = 2;
await expect(chain.commitBatch(1, header, ["0x"], "0x")).to.revertedWithCustomError(
chain,
"ErrorInvalidBatchHeaderVersion"
);
await expect(chain.commitBatch(2, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
chain,
"ErrorInvalidBatchHeaderVersion"
);
});
it("should revert when ErrorNoBlobFound", async () => {
await expect(chain.commitBatch(1, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
chain,

View File

@@ -92,12 +92,10 @@ contract DeployL1BridgeContracts is Script {
}
function deployMultipleVersionRollupVerifier() internal {
uint256[] memory _versions = new uint256[](2);
address[] memory _verifiers = new address[](2);
uint256[] memory _versions = new uint256[](1);
address[] memory _verifiers = new address[](1);
_versions[0] = 0;
_verifiers[0] = address(zkEvmVerifierV1);
_versions[1] = 1;
_verifiers[1] = address(zkEvmVerifierV1);
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));

View File

@@ -74,9 +74,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @dev Thrown when the previous state root doesn't match stored one.
error ErrorIncorrectPreviousStateRoot();
/// @dev Thrown when the batch header version is invalid.
error ErrorInvalidBatchHeaderVersion();
/// @dev Thrown when the last message is skipped.
error ErrorLastL1MessageSkipped();
@@ -119,7 +116,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
/// point evaluation precompile
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
uint256 private constant BLS_MODULUS =
52435875175126190479447740508185965837690552500527637822603658699938581184513;
/// @notice The chain id of the corresponding layer 2 chain.
uint64 public immutable layer2ChainId;
@@ -310,7 +308,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
batchPtr,
BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length
);
} else if (_version == 1) {
} else if (_version >= 1) {
// versions 1 and 2 both use ChunkCodecV1 and BatchHeaderV1Codec,
// but they use different blob encoding and different verifiers.
bytes32 blobVersionedHash;
(blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1(
_totalL1MessagesPoppedOverall,
@@ -322,7 +323,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
}
// store entries, the order matters
BatchHeaderV1Codec.storeVersion(batchPtr, 1);
BatchHeaderV1Codec.storeVersion(batchPtr, _version);
BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex);
BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
@@ -335,8 +336,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
batchPtr,
BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length
);
} else {
revert ErrorInvalidBatchHeaderVersion();
}
// check the length of bitmap
@@ -711,18 +710,15 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
version := shr(248, calldataload(_batchHeader.offset))
}
// version should be always 0 or 1 in current code
uint256 _length;
if (version == 0) {
(batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader);
_batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length);
_batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr);
} else if (version == 1) {
} else if (version >= 1) {
(batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader);
_batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length);
_batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr);
} else {
revert ErrorInvalidBatchHeaderVersion();
}
// only check when genesis is imported
if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) {

View File

@@ -11,7 +11,7 @@ import {IScrollERC20Upgradeable} from "../../libraries/token/IScrollERC20Upgrade
/// @title L2ERC20Gateway
/// @notice The `L2ERC20Gateway` is used to withdraw custom ERC20 compatible tokens on layer 2 and
/// finalize deposit the tokens from layer 1.
/// @dev The withdrawn tokens tokens will be burned directly. On finalizing deposit, the corresponding
/// @dev The withdrawn tokens will be burned directly. On finalizing deposit, the corresponding
/// tokens will be minted and transferred to the recipient.
contract L2CustomERC20Gateway is L2ERC20Gateway {
/**********

View File

@@ -15,10 +15,22 @@ interface IL1GasPriceOracle {
/// @param scalar The current fee scalar updated.
event ScalarUpdated(uint256 scalar);
/// @notice Emitted when current commit fee scalar is updated.
/// @param scalar The current commit fee scalar updated.
event CommitScalarUpdated(uint256 scalar);
/// @notice Emitted when current blob fee scalar is updated.
/// @param scalar The current blob fee scalar updated.
event BlobScalarUpdated(uint256 scalar);
/// @notice Emitted when current l1 base fee is updated.
/// @param l1BaseFee The current l1 base fee updated.
event L1BaseFeeUpdated(uint256 l1BaseFee);
/// @notice Emitted when current l1 blob base fee is updated.
/// @param l1BlobBaseFee The current l1 blob base fee updated.
event L1BlobBaseFeeUpdated(uint256 l1BlobBaseFee);
/*************************
* Public View Functions *
*************************/
@@ -26,15 +38,24 @@ interface IL1GasPriceOracle {
/// @notice Return the current l1 fee overhead.
function overhead() external view returns (uint256);
/// @notice Return the current l1 fee scalar.
/// @notice Return the current l1 fee scalar before Curie fork.
function scalar() external view returns (uint256);
/// @notice Return the current l1 commit fee scalar.
function commitScalar() external view returns (uint256);
/// @notice Return the current l1 blob fee scalar.
function blobScalar() external view returns (uint256);
/// @notice Return the latest known l1 base fee.
function l1BaseFee() external view returns (uint256);
/// @notice Return the latest known l1 blob base fee.
function l1BlobBaseFee() external view returns (uint256);
/// @notice Computes the L1 portion of the fee based on the size of the rlp encoded input
/// transaction, the current L1 base fee, and the various dynamic parameters.
/// @param data Unsigned fully RLP-encoded transaction to get the L1 fee for.
/// @param data Signed fully RLP-encoded transaction to get the L1 fee for.
/// @return L1 fee that should be paid for the tx
function getL1Fee(bytes memory data) external view returns (uint256);
@@ -42,7 +63,7 @@ interface IL1GasPriceOracle {
/// represents the per-transaction gas overhead of posting the transaction and state
/// roots to L1. Adds 74 bytes of padding to account for the fact that the input does
/// not have a signature.
/// @param data Unsigned fully RLP-encoded transaction to get the L1 gas for.
/// @param data Signed fully RLP-encoded transaction to get the L1 gas for.
/// @return Amount of L1 gas used to publish the transaction.
function getL1GasUsed(bytes memory data) external view returns (uint256);
@@ -53,4 +74,9 @@ interface IL1GasPriceOracle {
/// @notice Allows whitelisted caller to modify the l1 base fee.
/// @param _l1BaseFee New l1 base fee.
function setL1BaseFee(uint256 _l1BaseFee) external;
/// @notice Allows whitelisted caller to modify the l1 base fee.
/// @param _l1BaseFee New l1 base fee.
/// @param _l1BlobBaseFee New l1 blob base fee.
function setL1BaseFeeAndBlobBaseFee(uint256 _l1BaseFee, uint256 _l1BlobBaseFee) external;
}

View File

@@ -17,6 +17,28 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
/// @param _newWhitelist The address of new whitelist contract.
event UpdateWhitelist(address _oldWhitelist, address _newWhitelist);
/**********
* Errors *
**********/
/// @dev Thrown when the blob fee scalar exceed `MAX_BLOB_SCALAR`.
error ErrExceedMaxBlobScalar();
/// @dev Thrown when the commit fee scalar exceed `MAX_COMMIT_SCALAR`.
error ErrExceedMaxCommitScalar();
/// @dev Thrown when the l1 fee overhead exceed `MAX_OVERHEAD`.
error ErrExceedMaxOverhead();
/// @dev Thrown when the l1 fee scalar exceed `MAX_SCALAR`.
error ErrExceedMaxScalar();
/// @dev Thrown when the caller is not whitelisted.
error ErrCallerNotWhitelisted();
/// @dev Thrown when we enable Curie fork after Curie fork.
error ErrAlreadyInCurieFork();
/*************
* Constants *
*************/
@@ -28,9 +50,25 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
/// Computed based on current l1 block gas limit.
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
/// @dev The maximum possible l1 fee scale.
/// @dev The maximum possible l1 fee scale before Curie.
/// x1000 should be enough.
uint256 private constant MAX_SCALE = 1000 * PRECISION;
uint256 private constant MAX_SCALAR = 1000 * PRECISION;
/// @dev The maximum possible l1 commit fee scalar after Curie.
/// We derive the commit scalar by
/// ```
/// commit_scalar = commit_gas_per_tx * fluctuation_multiplier * 1e9
/// ```
/// So, the value should not exceed 10^9 * 1e9 normally.
uint256 private constant MAX_COMMIT_SCALAR = 10 ** 9 * PRECISION;
/// @dev The maximum possible l1 blob fee scalar after Curie.
/// We derive the blob scalar by
/// ```
/// blob_scalar = fluctuation_multiplier / compression_ratio / blob_util_ratio * 1e9
/// ```
/// So, the value should not exceed 10^9 * 1e9 normally.
uint256 private constant MAX_BLOB_SCALAR = 10 ** 9 * PRECISION;
/*************
* Variables *
@@ -48,6 +86,27 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
/// @notice The address of whitelist contract.
IWhitelist public whitelist;
/// @inheritdoc IL1GasPriceOracle
uint256 public override l1BlobBaseFee;
/// @inheritdoc IL1GasPriceOracle
uint256 public override commitScalar;
/// @inheritdoc IL1GasPriceOracle
uint256 public override blobScalar;
/// @notice Indicates whether the network has gone through the Curie upgrade.
bool public isCurie;
/*************
* Modifiers *
*************/
modifier onlyWhitelistedSender() {
if (!whitelist.isSenderAllowed(msg.sender)) revert ErrCallerNotWhitelisted();
_;
}
/***************
* Constructor *
***************/
@@ -62,15 +121,116 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
/// @inheritdoc IL1GasPriceOracle
function getL1Fee(bytes memory _data) external view override returns (uint256) {
uint256 _l1GasUsed = getL1GasUsed(_data);
uint256 _l1Fee = _l1GasUsed * l1BaseFee;
return (_l1Fee * scalar) / PRECISION;
if (isCurie) {
return _getL1FeeCurie(_data);
} else {
return _getL1FeeBeforeCurie(_data);
}
}
/// @inheritdoc IL1GasPriceOracle
/// @dev The `_data` is the RLP-encoded transaction with signature. And we also reserve additional
/// 4 bytes in the non-zero bytes to store the number of bytes in the RLP-encoded transaction.
function getL1GasUsed(bytes memory _data) public view override returns (uint256) {
if (isCurie) {
// It is near zero since we put all transactions to blob.
return 0;
} else {
return _getL1GasUsedBeforeCurie(_data);
}
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IL1GasPriceOracle
function setL1BaseFee(uint256 _l1BaseFee) external override onlyWhitelistedSender {
l1BaseFee = _l1BaseFee;
emit L1BaseFeeUpdated(_l1BaseFee);
}
/// @inheritdoc IL1GasPriceOracle
function setL1BaseFeeAndBlobBaseFee(
uint256 _l1BaseFee,
uint256 _l1BlobBaseFee
) external override onlyWhitelistedSender {
l1BaseFee = _l1BaseFee;
l1BlobBaseFee = _l1BlobBaseFee;
emit L1BaseFeeUpdated(_l1BaseFee);
emit L1BlobBaseFeeUpdated(_l1BlobBaseFee);
}
/************************
* Restricted Functions *
************************/
/// @notice Allows the owner to modify the overhead.
/// @param _overhead New overhead
function setOverhead(uint256 _overhead) external onlyOwner {
if (_overhead > MAX_OVERHEAD) revert ErrExceedMaxOverhead();
overhead = _overhead;
emit OverheadUpdated(_overhead);
}
/// Allows the owner to modify the scalar.
/// @param _scalar New scalar
function setScalar(uint256 _scalar) external onlyOwner {
if (_scalar > MAX_SCALAR) revert ErrExceedMaxScalar();
scalar = _scalar;
emit ScalarUpdated(_scalar);
}
/// Allows the owner to modify the commit scalar.
/// @param _scalar New scalar
function setCommitScalar(uint256 _scalar) external onlyOwner {
if (_scalar > MAX_COMMIT_SCALAR) revert ErrExceedMaxCommitScalar();
commitScalar = _scalar;
emit CommitScalarUpdated(_scalar);
}
/// Allows the owner to modify the blob scalar.
/// @param _scalar New scalar
function setBlobScalar(uint256 _scalar) external onlyOwner {
if (_scalar > MAX_BLOB_SCALAR) revert ErrExceedMaxBlobScalar();
blobScalar = _scalar;
emit BlobScalarUpdated(_scalar);
}
/// @notice Update whitelist contract.
/// @dev This function can only called by contract owner.
/// @param _newWhitelist The address of new whitelist contract.
function updateWhitelist(address _newWhitelist) external onlyOwner {
address _oldWhitelist = address(whitelist);
whitelist = IWhitelist(_newWhitelist);
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
}
/// @notice Enable the Curie fork (callable by contract owner).
///
/// @dev Since this is a predeploy contract, we will directly set the slot while hard fork
/// to avoid external owner operations.
/// The reason that we keep this function is for easy unit testing.
function enableCurie() external onlyOwner {
if (isCurie) revert ErrAlreadyInCurieFork();
isCurie = true;
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to computes the amount of L1 gas used for a transaction before Curie fork.
/// The `_data` is the RLP-encoded transaction with signature. And we also reserve additional
/// 4 bytes in the non-zero bytes to store the number of bytes in the RLP-encoded transaction.
/// @param _data Signed fully RLP-encoded transaction to get the L1 gas for.
/// @return Amount of L1 gas used to publish the transaction.
function _getL1GasUsedBeforeCurie(bytes memory _data) private view returns (uint256) {
uint256 _total = 0;
uint256 _length = _data.length;
unchecked {
@@ -85,48 +245,22 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
}
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IL1GasPriceOracle
function setL1BaseFee(uint256 _l1BaseFee) external override {
require(whitelist.isSenderAllowed(msg.sender), "Not whitelisted sender");
l1BaseFee = _l1BaseFee;
emit L1BaseFeeUpdated(_l1BaseFee);
/// @dev Internal function to compute the L1 portion of the fee based on the size of the rlp encoded input
/// transaction, the current L1 base fee, and the various dynamic parameters, before Curie fork.
/// @param _data Signed fully RLP-encoded transaction to get the L1 fee for.
/// @return L1 fee that should be paid for the tx
function _getL1FeeBeforeCurie(bytes memory _data) private view returns (uint256) {
uint256 _l1GasUsed = _getL1GasUsedBeforeCurie(_data);
uint256 _l1Fee = _l1GasUsed * l1BaseFee;
return (_l1Fee * scalar) / PRECISION;
}
/************************
* Restricted Functions *
************************/
/// @notice Allows the owner to modify the overhead.
/// @param _overhead New overhead
function setOverhead(uint256 _overhead) external onlyOwner {
require(_overhead <= MAX_OVERHEAD, "exceed maximum overhead");
overhead = _overhead;
emit OverheadUpdated(_overhead);
}
/// Allows the owner to modify the scalar.
/// @param _scalar New scalar
function setScalar(uint256 _scalar) external onlyOwner {
require(_scalar <= MAX_SCALE, "exceed maximum scale");
scalar = _scalar;
emit ScalarUpdated(_scalar);
}
/// @notice Update whitelist contract.
/// @dev This function can only called by contract owner.
/// @param _newWhitelist The address of new whitelist contract.
function updateWhitelist(address _newWhitelist) external onlyOwner {
address _oldWhitelist = address(whitelist);
whitelist = IWhitelist(_newWhitelist);
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
/// @dev Internal function to compute the L1 portion of the fee based on the size of the rlp encoded input
/// transaction, the current L1 base fee, and the various dynamic parameters, after Curie fork.
/// @param _data Signed fully RLP-encoded transaction to get the L1 fee for.
/// @return L1 fee that should be paid for the tx
function _getL1FeeCurie(bytes memory _data) private view returns (uint256) {
// We have bounded the value of `commitScalar` and `blobScalar`, the whole expression won't overflow.
return (commitScalar * l1BaseFee + blobScalar * _data.length * l1BlobBaseFee) / PRECISION;
}
}

Some files were not shown because too many files have changed in this diff Show More