mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
52 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5362e28f74 | ||
|
|
e8eb7ff8fd | ||
|
|
b01b5819da | ||
|
|
cb09024821 | ||
|
|
8bd4277c13 | ||
|
|
02415a692a | ||
|
|
1d29b0117f | ||
|
|
4c6d7b7deb | ||
|
|
395a0b7fe2 | ||
|
|
1606c271f0 | ||
|
|
73414391b9 | ||
|
|
e35d1d5b4e | ||
|
|
96179d1c60 | ||
|
|
63f1b1e7de | ||
|
|
d8bac32145 | ||
|
|
f173c9545d | ||
|
|
e04e45cab9 | ||
|
|
e5d68a97fc | ||
|
|
0691d1b181 | ||
|
|
f75eda1418 | ||
|
|
e41fee6766 | ||
|
|
5c3b358a22 | ||
|
|
58517f935f | ||
|
|
dc98cf9c08 | ||
|
|
66619a7548 | ||
|
|
e5e5cafc48 | ||
|
|
6030927680 | ||
|
|
367179394e | ||
|
|
999db62121 | ||
|
|
ce29d82e5a | ||
|
|
6817ddd3db | ||
|
|
ea51ec6631 | ||
|
|
adbcd7fb0f | ||
|
|
42e13d3635 | ||
|
|
1901a3295c | ||
|
|
a04026d455 | ||
|
|
19b89d521b | ||
|
|
0ad7947c05 | ||
|
|
bab1841193 | ||
|
|
ed99ac8569 | ||
|
|
d3ec65fd8b | ||
|
|
7e958d6e9a | ||
|
|
670e848b2c | ||
|
|
c6e8fcb2d3 | ||
|
|
c68f4283b1 | ||
|
|
97745fc4d0 | ||
|
|
44c5f1c8b4 | ||
|
|
9bb48689ec | ||
|
|
352aea4e70 | ||
|
|
69224ebb93 | ||
|
|
317ba26206 | ||
|
|
7a1af5913e |
6
.github/workflows/bridge_history_api.yml
vendored
6
.github/workflows/bridge_history_api.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
|
||||
10
.github/workflows/common.yml
vendored
10
.github/workflows/common.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache cargo
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -79,15 +79,15 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
version: '0.8.24'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
|
||||
14
.github/workflows/contracts.yml
vendored
14
.github/workflows/contracts.yml
vendored
@@ -43,10 +43,10 @@ jobs:
|
||||
- name: Setup LCOV
|
||||
uses: hrishikesh-kadam/setup-lcov@v1
|
||||
|
||||
- name: Install Node.js 14
|
||||
- name: Install Node.js 18
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '14'
|
||||
node-version: '18'
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
@@ -73,13 +73,13 @@ jobs:
|
||||
run: yarn install
|
||||
|
||||
- name: Compile with foundry
|
||||
run: forge build
|
||||
run: forge build --evm-version cancun
|
||||
|
||||
- name: Run foundry tests
|
||||
run: forge test -vvv
|
||||
run: forge test --evm-version cancun -vvv
|
||||
|
||||
- name: Run foundry coverage
|
||||
run : forge coverage --report lcov
|
||||
run : forge coverage --evm-version cancun --report lcov
|
||||
|
||||
- name : Prune coverage
|
||||
run : lcov --rc branch_coverage=1 --remove ./lcov.info -o ./lcov.info.pruned 'src/mocks/*' 'src/test/*' 'scripts/*' 'node_modules/*' 'lib/*'
|
||||
@@ -102,10 +102,10 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install Node.js 14
|
||||
- name: Install Node.js 18
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '14'
|
||||
node-version: '18'
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
|
||||
4
.github/workflows/coordinator.yml
vendored
4
.github/workflows/coordinator.yml
vendored
@@ -101,9 +101,9 @@ jobs:
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
version: '0.8.24'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
|
||||
10
.github/workflows/database.yml
vendored
10
.github/workflows/database.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -72,15 +72,15 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
version: '0.8.24'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
|
||||
289
.github/workflows/docker.yml
vendored
289
.github/workflows/docker.yml
vendored
@@ -5,12 +5,17 @@ on:
|
||||
tags:
|
||||
- v**
|
||||
|
||||
env:
|
||||
AWS_REGION: us-west-2
|
||||
|
||||
jobs:
|
||||
event_watcher:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
@@ -18,22 +23,43 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push event_watcher docker
|
||||
uses: docker/build-push-action@v2
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: event-watcher
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: event-watcher
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/event-watcher:${{github.ref_name}}
|
||||
scrolltech/event-watcher:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
gas_oracle:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
@@ -41,22 +67,43 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push gas_oracle docker
|
||||
uses: docker/build-push-action@v2
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: gas-oracle
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: gas-oracle
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/gas-oracle:${{github.ref_name}}
|
||||
scrolltech/gas-oracle:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
@@ -64,68 +111,131 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push rollup_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: rollup-relayer
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: rollup-relayer
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/rollup-relayer:${{github.ref_name}}
|
||||
scrolltech/rollup-relayer:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-fetcher:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-fetcher docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/bridgehistoryapi-fetcher:${{github.ref_name}}
|
||||
scrolltech/bridgehistoryapi-fetcher:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: bridgehistoryapi-fetcher
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: bridgehistoryapi-fetcher
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-api docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/bridgehistoryapi-api:${{github.ref_name}}
|
||||
scrolltech/bridgehistoryapi-api:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: bridgehistoryapi-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: bridgehistoryapi-api
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
@@ -133,22 +243,43 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push coordinator docker
|
||||
uses: docker/build-push-action@v2
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: coordinator-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: coordinator-api
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/coordinator-api:${{github.ref_name}}
|
||||
scrolltech/coordinator-api:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-cron:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
@@ -156,14 +287,32 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push coordinator docker
|
||||
uses: docker/build-push-action@v2
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: coordinator-cron
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: coordinator-cron
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-cron.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/coordinator-cron:${{github.ref_name}}
|
||||
scrolltech/coordinator-cron:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
6
.github/workflows/integration.yml
vendored
6
.github/workflows/integration.yml
vendored
@@ -22,15 +22,15 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
version: '0.8.24'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
|
||||
8
.github/workflows/prover.yml
vendored
8
.github/workflows/prover.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache cargo
|
||||
@@ -73,7 +73,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
|
||||
14
.github/workflows/rollup.yml
vendored
14
.github/workflows/rollup.yml
vendored
@@ -34,15 +34,15 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
version: '0.8.24'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Lint
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -83,15 +83,15 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
version: '0.8.24'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2023 Scroll
|
||||
Copyright (c) 2022-2024 Scroll
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
3
Makefile
3
Makefile
@@ -43,6 +43,7 @@ fmt: ## format the code
|
||||
goimports -local $(PWD)/tests/integration-test/ -w .
|
||||
|
||||
dev_docker: ## build docker images for development/testing usages
|
||||
docker pull postgres
|
||||
docker build -t scroll_l1geth ./common/docker/l1geth/
|
||||
docker build -t scroll_l2geth ./common/docker/l2geth/
|
||||
|
||||
@@ -50,7 +51,7 @@ build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
|
||||
docker build -t scroll_test_image -f ./build/dockerfiles/local_testing.Dockerfile $$(mktemp -d)
|
||||
|
||||
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
|
||||
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app scroll_test_image
|
||||
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app -e HOST_PATH=$(PWD) scroll_test_image
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
We welcome community contributions to this repository. Before you submit any issues or PRs, please read the [Code of Conduct](CODE_OF_CONDUCT.md) and the [Contribution Guideline](CONTRIBUTING.md).
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.20
|
||||
+ Go 1.21
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
+ Hardhat / Foundry
|
||||
+ Docker
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
"confirmation": 0,
|
||||
"endpoint": "https://rpc.ankr.com/eth",
|
||||
"startHeight": 18306000,
|
||||
"blockTime": 10,
|
||||
"fetchLimit": 30,
|
||||
"blockTime": 12,
|
||||
"fetchLimit": 16,
|
||||
"MessengerAddr": "0x6774Bcbd5ceCeF1336b5300fb5186a12DDD8b367",
|
||||
"ETHGatewayAddr": "0x7F2b8C31F88B6006c382775eea88297Ec1e3E905",
|
||||
"WETHGatewayAddr": "0x7AC440cAe8EB6328de4fA621163a792c1EA9D4fE",
|
||||
@@ -23,7 +23,7 @@
|
||||
"confirmation": 0,
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"blockTime": 3,
|
||||
"fetchLimit": 100,
|
||||
"fetchLimit": 64,
|
||||
"MessengerAddr": "0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC",
|
||||
"ETHGatewayAddr": "0x6EA73e05AdC79974B931123675ea8F78FfdacDF0",
|
||||
"WETHGatewayAddr": "0x7003E7B7186f0E6601203b99F7B8DECBfA391cf9",
|
||||
|
||||
@@ -1,37 +1,49 @@
|
||||
module scroll-tech/bridge-history-api
|
||||
|
||||
go 1.20
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/gin-contrib/cors v1.5.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/google/uuid v1.4.0
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.5.0
|
||||
golang.org/x/sync v0.6.0
|
||||
gorm.io/gorm v1.25.5
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.12.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/docker/cli v25.0.1+incompatible // indirect
|
||||
github.com/docker/docker v25.0.3+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-kit/kit v0.9.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.15.5 // indirect
|
||||
@@ -40,7 +52,11 @@ require (
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
@@ -48,29 +64,40 @@ require (
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/gomega v1.27.1 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.7.1 // indirect
|
||||
github.com/sethvargo/go-retry v0.2.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
@@ -78,15 +105,19 @@ require (
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.16.0 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/net v0.18.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
@@ -1,15 +1,30 @@
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/ClickHouse/ch-go v0.58.2 h1:jSm2szHbT9MCAB1rJ3WuCJqmGLi5UTjlNu+f530UTS0=
|
||||
github.com/ClickHouse/ch-go v0.58.2/go.mod h1:Ap/0bEmiLa14gYjCiRkYGbXvbe8vwdrfTYWhsuQ99aw=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.15.0 h1:G0hTKyO8fXXR1bGnZ0DY3vTG01xYfOGW76zgjg5tmC4=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.15.0/go.mod h1:kXt1SRq0PIRa6aKZD7TnFnY9PQKmc2b13sHtOYcK6cQ=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
|
||||
github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
|
||||
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -24,7 +39,10 @@ github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ
|
||||
github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
|
||||
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
@@ -33,9 +51,16 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -45,21 +70,35 @@ github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsP
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
|
||||
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU=
|
||||
github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
|
||||
github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T/Lao=
|
||||
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
|
||||
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk=
|
||||
github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
@@ -67,10 +106,20 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
|
||||
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
|
||||
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
@@ -80,32 +129,50 @@ github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QX
|
||||
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -113,11 +180,14 @@ github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
|
||||
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
@@ -126,22 +196,28 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4=
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
@@ -149,86 +225,126 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
|
||||
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=
|
||||
github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M=
|
||||
github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4=
|
||||
github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg=
|
||||
github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s=
|
||||
github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pressly/goose/v3 v3.16.0 h1:xMJUsZdHLqSnCqESyKSqEfcYVYsUuup1nrOhaEFftQg=
|
||||
github.com/pressly/goose/v3 v3.16.0/go.mod h1:JwdKVnmCRhnF6XLQs2mHEQtucFD49cQBdRM4UiwkxsM=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c h1:MnAdt80steCDli4SAD0J0spBGNY+gQvbdptNjWztHcw=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c/go.mod h1:4HrFcoStbViFVy/9l/rvKl1XmizVAaPdgqI8v0U8hOc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0=
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
@@ -242,17 +358,25 @@ github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZ
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw=
|
||||
github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dzWP1Lu+A40W883dK/Mr3xyDSM/2MggS8GtHT0qgAnE=
|
||||
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA=
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
|
||||
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
|
||||
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
|
||||
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
|
||||
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
|
||||
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
@@ -261,69 +385,112 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
|
||||
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
|
||||
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
|
||||
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
|
||||
lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
|
||||
modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
|
||||
modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
|
||||
modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI=
|
||||
modernc.org/libc v1.32.0 h1:yXatHTrACp3WaKNRCoZwUK7qj5V8ep1XyY0ka4oYcNc=
|
||||
modernc.org/libc v1.32.0/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
|
||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
|
||||
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8=
|
||||
modernc.org/sqlite v1.27.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
|
||||
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
||||
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
// LayerConfig is the configuration of Layer1/Layer2
|
||||
type LayerConfig struct {
|
||||
// FetcherConfig is the configuration of Layer1 or Layer2 fetcher.
|
||||
type FetcherConfig struct {
|
||||
Confirmation uint64 `json:"confirmation"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, otherwise in the current implementation, the message proof could not be successfully updated.
|
||||
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, message proof should be updated from the very beginning.
|
||||
BlockTime int64 `json:"blockTime"`
|
||||
FetchLimit uint64 `json:"fetchLimit"`
|
||||
MessengerAddr string `json:"MessengerAddr"`
|
||||
@@ -43,8 +43,8 @@ type RedisConfig struct {
|
||||
|
||||
// Config is the configuration of the bridge history backend
|
||||
type Config struct {
|
||||
L1 *LayerConfig `json:"L1"`
|
||||
L2 *LayerConfig `json:"L2"`
|
||||
L1 *FetcherConfig `json:"L1"`
|
||||
L2 *FetcherConfig `json:"L2"`
|
||||
DB *database.Config `json:"db"`
|
||||
Redis *RedisConfig `json:"redis"`
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
// L1MessageFetcher fetches cross message events from L1 and saves them to database.
|
||||
type L1MessageFetcher struct {
|
||||
ctx context.Context
|
||||
cfg *config.LayerConfig
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
|
||||
l1SyncHeight uint64
|
||||
@@ -35,7 +35,7 @@ type L1MessageFetcher struct {
|
||||
}
|
||||
|
||||
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
|
||||
c := &L1MessageFetcher{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
@@ -108,7 +108,6 @@ func (c *L1MessageFetcher) Start() {
|
||||
}
|
||||
|
||||
func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
c.l1MessageFetcherRunningTotal.Inc()
|
||||
startHeight := c.l1SyncHeight + 1
|
||||
endHeight, rpcErr := utils.GetBlockNumber(c.ctx, c.client, confirmation)
|
||||
if rpcErr != nil {
|
||||
@@ -134,6 +133,7 @@ func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
c.l1MessageFetcherReorgTotal.Inc()
|
||||
log.Warn("L1 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
|
||||
c.updateL1SyncHeight(resyncHeight, lastBlockHash)
|
||||
c.l1MessageFetcherRunningTotal.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -143,6 +143,7 @@ func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
}
|
||||
|
||||
c.updateL1SyncHeight(to, lastBlockHash)
|
||||
c.l1MessageFetcherRunningTotal.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
// L2MessageFetcher fetches cross message events from L2 and saves them to database.
|
||||
type L2MessageFetcher struct {
|
||||
ctx context.Context
|
||||
cfg *config.LayerConfig
|
||||
cfg *config.FetcherConfig
|
||||
db *gorm.DB
|
||||
client *ethclient.Client
|
||||
l2SyncHeight uint64
|
||||
@@ -35,7 +35,7 @@ type L2MessageFetcher struct {
|
||||
}
|
||||
|
||||
// NewL2MessageFetcher creates a new L2MessageFetcher instance.
|
||||
func NewL2MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
|
||||
func NewL2MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
|
||||
c := &L2MessageFetcher{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
@@ -110,7 +110,6 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
return
|
||||
}
|
||||
log.Info("fetch and save missing L2 events", "start height", startHeight, "end height", endHeight, "confirmation", confirmation)
|
||||
c.l2MessageFetcherRunningTotal.Inc()
|
||||
|
||||
for from := startHeight; from <= endHeight; from += c.cfg.FetchLimit {
|
||||
to := from + c.cfg.FetchLimit - 1
|
||||
@@ -128,6 +127,7 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
c.l2MessageFetcherReorgTotal.Inc()
|
||||
log.Warn("L2 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
|
||||
c.updateL2SyncHeight(resyncHeight, lastBlockHash)
|
||||
c.l2MessageFetcherRunningTotal.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -142,6 +142,7 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
}
|
||||
|
||||
c.updateL2SyncHeight(to, lastBlockHash)
|
||||
c.l2MessageFetcherRunningTotal.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -76,39 +76,30 @@ func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (ui
|
||||
|
||||
// L1InsertOrUpdate inserts or updates l1 messages
|
||||
func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult *L1FilterResult) error {
|
||||
err := b.db.Transaction(func(tx *gorm.DB) error {
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages, tx); txErr != nil {
|
||||
log.Error("failed to insert L1 deposit messages", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages, tx); txErr != nil {
|
||||
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents, tx); txErr != nil {
|
||||
log.Error("failed to insert or update batch events", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents, tx); txErr != nil {
|
||||
log.Error("failed to insert L1 message queue events", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l1FetcherResult.RevertedTxs, tx); txErr != nil {
|
||||
log.Error("failed to insert L1 failed gateway router transactions", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error("failed to update db of L1 events", "err", err)
|
||||
if err := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages); err != nil {
|
||||
log.Error("failed to insert L1 deposit messages", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages); err != nil {
|
||||
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents); err != nil {
|
||||
log.Error("failed to insert or update batch events", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents); err != nil {
|
||||
log.Error("failed to insert L1 message queue events", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.crossMessageOrm.InsertFailedL1GatewayTxs(ctx, l1FetcherResult.RevertedTxs); err != nil {
|
||||
log.Error("failed to insert failed L1 gateway transactions", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -186,24 +177,18 @@ func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, heig
|
||||
|
||||
// L2InsertOrUpdate inserts or updates L2 messages
|
||||
func (b *EventUpdateLogic) L2InsertOrUpdate(ctx context.Context, l2FetcherResult *L2FilterResult) error {
|
||||
err := b.db.Transaction(func(tx *gorm.DB) error {
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages, tx); txErr != nil {
|
||||
log.Error("failed to insert L2 withdrawal messages", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages, tx); txErr != nil {
|
||||
log.Error("failed to update L2 relayed messages of L1 deposits", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l2FetcherResult.OtherRevertedTxs, tx); txErr != nil {
|
||||
log.Error("failed to insert L2 failed gateway router transactions", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages); err != nil {
|
||||
log.Error("failed to insert L2 withdrawal messages", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Error("failed to update db of L2 events", "err", err)
|
||||
if err := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages); err != nil {
|
||||
log.Error("failed to update L2 relayed messages of L1 deposits", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.crossMessageOrm.InsertFailedL2GatewayTxs(ctx, l2FetcherResult.OtherRevertedTxs); err != nil {
|
||||
log.Error("failed to insert failed L2 gateway transactions", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -2,6 +2,7 @@ package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -10,21 +11,27 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// L1EventParser the l1 event parser
|
||||
type L1EventParser struct {
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
}
|
||||
|
||||
// NewL1EventParser creates l1 event parser
|
||||
func NewL1EventParser() *L1EventParser {
|
||||
return &L1EventParser{}
|
||||
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1EventParser {
|
||||
return &L1EventParser{
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseL1CrossChainEventLogs parses L1 watched cross chain events.
|
||||
func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l1DepositMessages []*orm.CrossMessage
|
||||
var l1RelayedMessages []*orm.CrossMessage
|
||||
for _, vlog := range logs {
|
||||
@@ -32,7 +39,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1DepositETHSig:
|
||||
event := backendabi.ETHMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ETHGatewayABI, &event, "DepositETH", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DepositETH event", "err", err)
|
||||
log.Error("Failed to unpack DepositETH event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -44,7 +51,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL1ERC20GatewayABI, &event, "DepositERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC20 event", "err", err)
|
||||
log.Error("Failed to unpack DepositERC20 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -57,7 +64,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1DepositERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "DepositERC721", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DepositERC721 event", "err", err)
|
||||
log.Error("Failed to unpack DepositERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -70,7 +77,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1BatchDepositERC721Sig:
|
||||
event := backendabi.BatchERC721MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "BatchDepositERC721", vlog); err != nil {
|
||||
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
|
||||
log.Error("Failed to unpack BatchDepositERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -83,7 +90,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1DepositERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "DepositERC1155", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
|
||||
log.Error("Failed to unpack DepositERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -97,7 +104,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1BatchDepositERC1155Sig:
|
||||
event := backendabi.BatchERC1155MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog); err != nil {
|
||||
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
|
||||
log.Error("Failed to unpack BatchDepositERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -111,12 +118,17 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1SentMessageEventSig:
|
||||
event := backendabi.L1SentMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
log.Error("Failed to unpack SentMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
from, err := getRealFromAddress(ctx, event.Sender, event.Message, e.client, vlog.TxHash, e.cfg.GatewayRouterAddr)
|
||||
if err != nil {
|
||||
log.Error("Failed to get real 'from' address", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l1DepositMessages = append(l1DepositMessages, &orm.CrossMessage{
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Sender: from,
|
||||
Receiver: event.Target.String(),
|
||||
TokenType: int(orm.TokenTypeETH),
|
||||
L1TxHash: vlog.TxHash.String(),
|
||||
@@ -130,7 +142,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1RelayedMessageEventSig:
|
||||
event := backendabi.L1RelayedMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "RelayedMessage", vlog); err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
log.Error("Failed to unpack RelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
|
||||
@@ -143,7 +155,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1FailedRelayedMessageEventSig:
|
||||
event := backendabi.L1FailedRelayedMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "FailedRelayedMessage", vlog); err != nil {
|
||||
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
|
||||
log.Error("Failed to unpack FailedRelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
|
||||
@@ -166,17 +178,17 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
case backendabi.L1CommitBatchEventSig:
|
||||
event := backendabi.L1CommitBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "CommitBatch", vlog); err != nil {
|
||||
log.Warn("Failed to unpack CommitBatch event", "err", err)
|
||||
log.Error("Failed to unpack CommitBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
|
||||
if err != nil || isPending {
|
||||
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
}
|
||||
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
@@ -190,7 +202,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
case backendabi.L1RevertBatchEventSig:
|
||||
event := backendabi.L1RevertBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
|
||||
log.Warn("Failed to unpack RevertBatch event", "err", err)
|
||||
log.Error("Failed to unpack RevertBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
@@ -202,7 +214,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
case backendabi.L1FinalizeBatchEventSig:
|
||||
event := backendabi.L1FinalizeBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
|
||||
log.Warn("Failed to unpack FinalizeBatch event", "err", err)
|
||||
log.Error("Failed to unpack FinalizeBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
@@ -229,7 +241,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
case backendabi.L1QueueTransactionEventSig:
|
||||
event := backendabi.L1QueueTransactionEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "QueueTransaction", vlog); err != nil {
|
||||
log.Warn("Failed to unpack QueueTransaction event", "err", err)
|
||||
log.Error("Failed to unpack QueueTransaction event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
messageHash := common.BytesToHash(crypto.Keccak256(event.Data))
|
||||
@@ -245,7 +257,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
case backendabi.L1DequeueTransactionEventSig:
|
||||
event := backendabi.L1DequeueTransactionEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DequeueTransaction", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DequeueTransaction event", "err", err)
|
||||
log.Error("Failed to unpack DequeueTransaction event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
skippedIndices := utils.GetSkippedQueueIndices(event.StartIndex.Uint64(), event.SkippedBitmap)
|
||||
@@ -258,7 +270,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
case backendabi.L1DropTransactionEventSig:
|
||||
event := backendabi.L1DropTransactionEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DropTransaction event", "err", err)
|
||||
log.Error("Failed to unpack DropTransaction event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
|
||||
@@ -270,3 +282,39 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
}
|
||||
return l1MessageQueueEvents, nil
|
||||
}
|
||||
|
||||
func getRealFromAddress(ctx context.Context, eventSender common.Address, eventMessage []byte, client *ethclient.Client, txHash common.Hash, gatewayRouterAddr string) (string, error) {
|
||||
if eventSender != common.HexToAddress(gatewayRouterAddr) {
|
||||
return eventSender.String(), nil
|
||||
}
|
||||
|
||||
// deposit/withdraw ETH: EOA -> contract 1 -> ... -> contract n -> gateway router -> messenger.
|
||||
if len(eventMessage) >= 32 {
|
||||
addressBytes := eventMessage[32-common.AddressLength : 32]
|
||||
var address common.Address
|
||||
address.SetBytes(addressBytes)
|
||||
|
||||
return address.Hex(), nil
|
||||
}
|
||||
|
||||
log.Warn("event message data too short to contain an address", "length", len(eventMessage))
|
||||
|
||||
// Legacy handling logic if length of message < 32, for backward compatibility before the next contract upgrade.
|
||||
tx, isPending, rpcErr := client.TransactionByHash(ctx, txHash)
|
||||
if rpcErr != nil || isPending {
|
||||
log.Error("Failed to get transaction or the transaction is still pending", "rpcErr", rpcErr, "isPending", isPending)
|
||||
return "", rpcErr
|
||||
}
|
||||
// Case 1: deposit/withdraw ETH: EOA -> multisig -> gateway router -> messenger.
|
||||
if tx.To() != nil && (*tx.To()).String() != gatewayRouterAddr {
|
||||
return (*tx.To()).String(), nil
|
||||
}
|
||||
// Case 2: deposit/withdraw ETH: EOA -> gateway router -> messenger.
|
||||
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
|
||||
sender, err := signer.Sender(tx)
|
||||
if err != nil {
|
||||
log.Error("Get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", err)
|
||||
return "", err
|
||||
}
|
||||
return sender.String(), nil
|
||||
}
|
||||
|
||||
@@ -34,9 +34,10 @@ type L1FilterResult struct {
|
||||
|
||||
// L1FetcherLogic the L1 fetcher logic
|
||||
type L1FetcherLogic struct {
|
||||
cfg *config.LayerConfig
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
addressList []common.Address
|
||||
gatewayList []common.Address
|
||||
parser *L1EventParser
|
||||
db *gorm.DB
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
@@ -46,7 +47,7 @@ type L1FetcherLogic struct {
|
||||
}
|
||||
|
||||
// NewL1FetcherLogic creates L1 fetcher logic
|
||||
func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
|
||||
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
|
||||
addressList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
@@ -65,16 +66,34 @@ func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
common.HexToAddress(cfg.MessageQueueAddr),
|
||||
}
|
||||
|
||||
gatewayList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.WETHGatewayAddr),
|
||||
common.HexToAddress(cfg.DAIGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.ERC721GatewayAddr),
|
||||
common.HexToAddress(cfg.ERC1155GatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.MessengerAddr),
|
||||
|
||||
common.HexToAddress(cfg.GatewayRouterAddr),
|
||||
}
|
||||
|
||||
// Optional erc20 gateways.
|
||||
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList)
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
db: db,
|
||||
@@ -83,7 +102,8 @@ func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
addressList: addressList,
|
||||
parser: NewL1EventParser(),
|
||||
gatewayList: gatewayList,
|
||||
parser: NewL1EventParser(cfg, client),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
@@ -96,7 +116,7 @@ func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.Block, error) {
|
||||
blocks, err := utils.GetL1BlocksInRange(ctx, f.client, from, to)
|
||||
blocks, err := utils.GetBlocksInRange(ctx, f.client, from, to)
|
||||
if err != nil {
|
||||
log.Error("failed to get L1 blocks in range", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
@@ -131,15 +151,9 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
txTo := tx.To()
|
||||
if txTo == nil {
|
||||
continue
|
||||
}
|
||||
toAddress := txTo.String()
|
||||
|
||||
// GatewayRouter: L1 deposit.
|
||||
// Gateways: L1 deposit.
|
||||
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
|
||||
if toAddress != f.cfg.GatewayRouterAddr && toAddress != f.cfg.MessengerAddr {
|
||||
if !isTransactionToGateway(tx, f.gatewayList) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -233,7 +247,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(eventLogs, blockTimestampsMap)
|
||||
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L1 cross chain event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
|
||||
@@ -1,26 +1,35 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// L2EventParser the L2 event parser
|
||||
type L2EventParser struct {
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
}
|
||||
|
||||
// NewL2EventParser creates the L2 event parser
|
||||
func NewL2EventParser() *L2EventParser {
|
||||
return &L2EventParser{}
|
||||
func NewL2EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L2EventParser {
|
||||
return &L2EventParser{
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseL2EventLogs parses L2 watched events
|
||||
func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l2WithdrawMessages []*orm.CrossMessage
|
||||
var l2RelayedMessages []*orm.CrossMessage
|
||||
for _, vlog := range logs {
|
||||
@@ -29,7 +38,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.ETHMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ETHGatewayABI, &event, "WithdrawETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawETH event", "err", err)
|
||||
log.Error("Failed to unpack WithdrawETH event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -41,7 +50,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC20GatewayABI, &event, "WithdrawERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
log.Error("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -55,7 +64,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
log.Error("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -69,7 +78,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.BatchERC721MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
|
||||
log.Error("Failed to unpack BatchWithdrawERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -83,7 +92,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
log.Error("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -98,7 +107,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.BatchERC1155MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
|
||||
log.Error("Failed to unpack BatchWithdrawERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -113,12 +122,17 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.L2SentMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
log.Error("Failed to unpack SentMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
from, err := getRealFromAddress(ctx, event.Sender, event.Message, e.client, vlog.TxHash, e.cfg.GatewayRouterAddr)
|
||||
if err != nil {
|
||||
log.Error("Failed to get real 'from' address", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l2WithdrawMessages = append(l2WithdrawMessages, &orm.CrossMessage{
|
||||
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
|
||||
Sender: event.Sender.String(),
|
||||
Sender: from,
|
||||
Receiver: event.Target.String(),
|
||||
TokenType: int(orm.TokenTypeETH),
|
||||
L2TxHash: vlog.TxHash.String(),
|
||||
@@ -137,7 +151,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
log.Error("Failed to unpack RelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
|
||||
@@ -151,7 +165,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "FailedRelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
|
||||
log.Error("Failed to unpack FailedRelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
|
||||
|
||||
@@ -33,9 +33,10 @@ type L2FilterResult struct {
|
||||
|
||||
// L2FetcherLogic the L2 fetcher logic
|
||||
type L2FetcherLogic struct {
|
||||
cfg *config.LayerConfig
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
addressList []common.Address
|
||||
gatewayList []common.Address
|
||||
parser *L2EventParser
|
||||
db *gorm.DB
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
@@ -45,7 +46,7 @@ type L2FetcherLogic struct {
|
||||
}
|
||||
|
||||
// NewL2FetcherLogic create L2 fetcher logic
|
||||
func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
|
||||
func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
|
||||
addressList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
@@ -60,16 +61,34 @@ func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
common.HexToAddress(cfg.MessengerAddr),
|
||||
}
|
||||
|
||||
gatewayList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.WETHGatewayAddr),
|
||||
common.HexToAddress(cfg.DAIGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.ERC721GatewayAddr),
|
||||
common.HexToAddress(cfg.ERC1155GatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.MessengerAddr),
|
||||
|
||||
common.HexToAddress(cfg.GatewayRouterAddr),
|
||||
}
|
||||
|
||||
// Optional erc20 gateways.
|
||||
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList)
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L2FetcherLogic{
|
||||
db: db,
|
||||
@@ -78,7 +97,8 @@ func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
addressList: addressList,
|
||||
parser: NewL2EventParser(),
|
||||
gatewayList: gatewayList,
|
||||
parser: NewL2EventParser(cfg, client),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
@@ -90,8 +110,8 @@ func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *L2FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.BlockWithRowConsumption, error) {
|
||||
blocks, err := utils.GetL2BlocksInRange(ctx, f.client, from, to)
|
||||
func (f *L2FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to uint64, lastBlockHash common.Hash) (bool, uint64, common.Hash, []*types.Block, error) {
|
||||
blocks, err := utils.GetBlocksInRange(ctx, f.client, from, to)
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 blocks in range", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
@@ -117,7 +137,7 @@ func (f *L2FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to u
|
||||
return false, 0, lastBlockHash, blocks, nil
|
||||
}
|
||||
|
||||
func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.BlockWithRowConsumption) (map[uint64]uint64, []*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l2RevertedUserTxs []*orm.CrossMessage
|
||||
var l2RevertedRelayedMessageTxs []*orm.CrossMessage
|
||||
blockTimestampsMap := make(map[uint64]uint64)
|
||||
@@ -127,42 +147,7 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
txTo := tx.To()
|
||||
if txTo == nil {
|
||||
continue
|
||||
}
|
||||
toAddress := txTo.String()
|
||||
|
||||
// GatewayRouter: L2 withdrawal.
|
||||
if toAddress == f.cfg.GatewayRouterAddr {
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
return nil, nil, nil, receiptErr
|
||||
}
|
||||
|
||||
// Check if the transaction is failed
|
||||
if receipt.Status == types.ReceiptStatusFailed {
|
||||
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
|
||||
sender, signerErr := signer.Sender(tx)
|
||||
if signerErr != nil {
|
||||
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
|
||||
return nil, nil, nil, signerErr
|
||||
}
|
||||
|
||||
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
|
||||
L2TxHash: tx.Hash().String(),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
Sender: sender.String(),
|
||||
Receiver: (*tx.To()).String(),
|
||||
L2BlockNumber: receipt.BlockNumber.Uint64(),
|
||||
BlockTimestamp: block.Time(),
|
||||
TxStatus: int(orm.TxStatusTypeSentTxReverted),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if tx.Type() == types.L1MessageTxType {
|
||||
if tx.IsL1MessageTx() {
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
@@ -179,6 +164,38 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Gateways: L2 withdrawal.
|
||||
if !isTransactionToGateway(tx, f.gatewayList) {
|
||||
continue
|
||||
}
|
||||
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
return nil, nil, nil, receiptErr
|
||||
}
|
||||
|
||||
// Check if the transaction is failed
|
||||
if receipt.Status == types.ReceiptStatusFailed {
|
||||
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
|
||||
sender, signerErr := signer.Sender(tx)
|
||||
if signerErr != nil {
|
||||
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
|
||||
return nil, nil, nil, signerErr
|
||||
}
|
||||
|
||||
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
|
||||
L2TxHash: tx.Hash().String(),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
Sender: sender.String(),
|
||||
Receiver: (*tx.To()).String(),
|
||||
L2BlockNumber: receipt.BlockNumber.Uint64(),
|
||||
BlockTimestamp: block.Time(),
|
||||
TxStatus: int(orm.TxStatusTypeSentTxReverted),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -235,7 +252,7 @@ func (f *L2FetcherLogic) L2Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(eventLogs, blockTimestampsMap)
|
||||
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L2 event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
@@ -279,3 +296,15 @@ func (f *L2FetcherLogic) updateMetrics(res L2FilterResult) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isTransactionToGateway(tx *types.Transaction, gatewayList []common.Address) bool {
|
||||
if tx.To() == nil {
|
||||
return false
|
||||
}
|
||||
for _, gateway := range gatewayList {
|
||||
if *tx.To() == gateway {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// BatchStatusType represents the type of batch status.
|
||||
@@ -89,19 +90,21 @@ func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, block
|
||||
}
|
||||
|
||||
// InsertOrUpdateBatchEvents inserts a new batch event or updates an existing one based on the BatchStatusType.
|
||||
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent, dbTX ...*gorm.DB) error {
|
||||
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent) error {
|
||||
for _, l1BatchEvent := range l1BatchEvents {
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&BatchEvent{})
|
||||
updateFields := make(map[string]interface{})
|
||||
switch BatchStatusType(l1BatchEvent.BatchStatus) {
|
||||
case BatchStatusTypeCommitted:
|
||||
// Use the clause to either insert or ignore on conflict
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "batch_hash"}},
|
||||
DoNothing: true,
|
||||
})
|
||||
if err := db.Create(l1BatchEvent).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert batch event, error: %w", err)
|
||||
return fmt.Errorf("failed to insert or ignore batch event, error: %w", err)
|
||||
}
|
||||
case BatchStatusTypeFinalized:
|
||||
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
@@ -47,9 +46,9 @@ const (
|
||||
TxStatusTypeSent TxStatusType = iota
|
||||
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
|
||||
TxStatusTypeRelayed // Terminal status.
|
||||
// FailedRelayedMessage event: encoded tx failed, cannot retry. e.g., https://sepolia.scrollscan.com/tx/0xfc7d3ea5ec8dc9b664a5a886c3b33d21e665355057601033481a439498efb79a
|
||||
TxStatusTypeFailedRelayed // Terminal status.
|
||||
// In some cases, user can retry with a larger gas limit. e.g., https://sepolia.scrollscan.com/tx/0x7323a7ba29492cb47d92206411be99b27896f2823cee0633a596b646b73f1b5b
|
||||
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
|
||||
TxStatusTypeFailedRelayed
|
||||
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
|
||||
TxStatusTypeRelayTxReverted
|
||||
TxStatusTypeSkipped
|
||||
TxStatusTypeDropped // Terminal status.
|
||||
@@ -254,38 +253,27 @@ func (c *CrossMessage) GetTxsByAddress(ctx context.Context, sender string) ([]*C
|
||||
}
|
||||
|
||||
// UpdateL1MessageQueueEventsInfo updates the information about L1 message queue events in the database.
|
||||
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent, dbTX ...*gorm.DB) error {
|
||||
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent) error {
|
||||
// update tx statuses.
|
||||
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
// do not over-write terminal statuses.
|
||||
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeFailedRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeDropped)
|
||||
txStatusUpdateFields := make(map[string]interface{})
|
||||
switch l1MessageQueueEvent.EventType {
|
||||
case MessageQueueEventTypeQueueTransaction:
|
||||
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
|
||||
// replayMessage case:
|
||||
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
|
||||
// Transaction reverted in L2: https://sepolia.scrollscan.com/tx/0xde6ef307a7da255888aad7a4c40a6b8c886e46a8a05883070bbf18b736cbfb8c
|
||||
// replayMessage: https://sepolia.etherscan.io/tx/0xa5392891232bb32d98fcdbaca0d91b4d22ef2755380d07d982eebd47b147ce28
|
||||
//
|
||||
// Note: update l1_tx_hash if the user calls replayMessage, cannot use queue index here,
|
||||
// because in replayMessage, queue index != message nonce.
|
||||
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
|
||||
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeSent // reset status to "sent".
|
||||
continue
|
||||
case MessageQueueEventTypeDequeueTransaction:
|
||||
// do not over-write terminal statuses.
|
||||
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeDropped)
|
||||
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeSkipped
|
||||
case MessageQueueEventTypeDropTransaction:
|
||||
// do not over-write terminal statuses.
|
||||
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeDropped)
|
||||
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeDropped
|
||||
@@ -298,15 +286,22 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
|
||||
// update tx hashes of replay and refund.
|
||||
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
txHashUpdateFields := make(map[string]interface{})
|
||||
switch l1MessageQueueEvent.EventType {
|
||||
case MessageQueueEventTypeDequeueTransaction:
|
||||
continue
|
||||
case MessageQueueEventTypeQueueTransaction:
|
||||
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
|
||||
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.
|
||||
// replayMessage case:
|
||||
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
|
||||
// Transaction reverted in L2: https://sepolia.scrollscan.com/tx/0xde6ef307a7da255888aad7a4c40a6b8c886e46a8a05883070bbf18b736cbfb8c
|
||||
// replayMessage: https://sepolia.etherscan.io/tx/0xa5392891232bb32d98fcdbaca0d91b4d22ef2755380d07d982eebd47b147ce28
|
||||
//
|
||||
// Note: update l1_tx_hash if the user calls replayMessage, cannot use queue index here,
|
||||
// because in replayMessage, queue index != message nonce.
|
||||
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
|
||||
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
|
||||
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
|
||||
case MessageQueueEventTypeDropTransaction:
|
||||
@@ -314,11 +309,8 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txHashUpdateFields["l1_refund_tx_hash"] = l1MessageQueueEvent.TxHash.String()
|
||||
}
|
||||
// Check if there are fields to update to avoid empty update operation (skip message).
|
||||
if len(txHashUpdateFields) > 0 {
|
||||
if err := db.Updates(txHashUpdateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
|
||||
}
|
||||
if err := db.Updates(txHashUpdateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -362,14 +354,11 @@ func (c *CrossMessage) UpdateBatchIndexRollupStatusMerkleProofOfL2Messages(ctx c
|
||||
}
|
||||
|
||||
// InsertOrUpdateL1Messages inserts or updates a list of L1 cross messages into the database.
|
||||
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
|
||||
@@ -384,18 +373,14 @@ func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []
|
||||
}
|
||||
|
||||
// InsertOrUpdateL2Messages inserts or updates a list of L2 cross messages into the database.
|
||||
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
|
||||
// The merkle_proof is updated separately in batch status updates and hence is not included here.
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "message_hash"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"sender", "receiver", "token_type", "l2_block_number", "l2_tx_hash", "l1_token_address", "l2_token_address", "token_ids", "token_amounts", "message_type", "block_timestamp", "message_from", "message_to", "message_value", "message_data", "message_nonce"}),
|
||||
@@ -406,31 +391,60 @@ func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertFailedGatewayRouterTxs inserts a list of transactions that failed to interact with the gateway router into the database.
|
||||
// These failed transactions are only fetched once, so they are inserted without checking for duplicates.
|
||||
// To resolve unique index confliction, a random UUID will be generated and used as the MessageHash.
|
||||
func (c *CrossMessage) InsertFailedGatewayRouterTxs(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
// InsertFailedL2GatewayTxs inserts a list of transactions that failed to interact with the L2 gateways into the database.
|
||||
// To resolve unique index confliction, L2 tx hash is used as the MessageHash.
|
||||
// The OnConflict clause is used to prevent inserting same failed transactions multiple times.
|
||||
func (c *CrossMessage) InsertFailedL2GatewayTxs(ctx context.Context, messages []*CrossMessage) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
|
||||
for _, message := range messages {
|
||||
message.MessageHash = message.L2TxHash
|
||||
}
|
||||
|
||||
db := c.db
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
for _, message := range messages {
|
||||
message.MessageHash = uuid.New().String()
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "message_hash"}},
|
||||
DoNothing: true,
|
||||
})
|
||||
|
||||
if err := db.Create(&messages).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
|
||||
}
|
||||
if err := db.Create(messages).Error; err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertFailedL1GatewayTxs inserts a list of transactions that failed to interact with the L1 gateways into the database.
|
||||
// To resolve unique index confliction, L1 tx hash is used as the MessageHash.
|
||||
// The OnConflict clause is used to prevent inserting same failed transactions multiple times.
|
||||
func (c *CrossMessage) InsertFailedL1GatewayTxs(ctx context.Context, messages []*CrossMessage) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, message := range messages {
|
||||
message.MessageHash = message.L1TxHash
|
||||
}
|
||||
|
||||
db := c.db
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "message_hash"}},
|
||||
DoNothing: true,
|
||||
})
|
||||
|
||||
if err := db.Create(&messages).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertOrUpdateL2RelayedMessagesOfL1Deposits inserts or updates the database with a list of L2 relayed messages related to L1 deposits.
|
||||
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage) error {
|
||||
if len(l2RelayedMessages) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -459,7 +473,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
|
||||
for _, msg := range mergedL2RelayedMessages {
|
||||
uniqueL2RelayedMessages = append(uniqueL2RelayedMessages, msg)
|
||||
}
|
||||
// Do not update tx status of successfully or failed relayed messages,
|
||||
// Do not update tx status of successfully relayed messages,
|
||||
// because if a message is handled, the later relayed message tx would be reverted.
|
||||
// ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L2/L2ScrollMessenger.sol#L102
|
||||
// e.g.,
|
||||
@@ -476,7 +490,6 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
|
||||
clause.And(
|
||||
// do not over-write terminal statuses.
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
|
||||
),
|
||||
},
|
||||
@@ -489,7 +502,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
|
||||
}
|
||||
|
||||
// InsertOrUpdateL1RelayedMessagesOfL2Withdrawals inserts or updates the database with a list of L1 relayed messages related to L2 withdrawals.
|
||||
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage) error {
|
||||
if len(l1RelayedMessages) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -519,9 +532,6 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
|
||||
uniqueL1RelayedMessages = append(uniqueL1RelayedMessages, msg)
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
@@ -532,7 +542,6 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
|
||||
clause.And(
|
||||
// do not over-write terminal statuses.
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
|
||||
),
|
||||
},
|
||||
|
||||
@@ -15,6 +15,7 @@ CREATE TABLE batch_event_v2
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS unique_idx_be_batch_hash ON batch_event_v2 (batch_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_be_l1_block_number ON batch_event_v2 (l1_block_number);
|
||||
CREATE INDEX IF NOT EXISTS idx_be_batch_index ON batch_event_v2 (batch_index);
|
||||
CREATE INDEX IF NOT EXISTS idx_be_batch_index_batch_hash ON batch_event_v2 (batch_index, batch_hash);
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
@@ -116,8 +115,8 @@ func GetBatchRangeFromCalldata(calldata []byte) (uint64, uint64, error) {
|
||||
return startBlock, finishBlock, err
|
||||
}
|
||||
|
||||
// GetL1BlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
func GetL1BlocksInRange(ctx context.Context, cli *ethclient.Client, start, end uint64) ([]*types.Block, error) {
|
||||
// GetBlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
func GetBlocksInRange(ctx context.Context, cli *ethclient.Client, start, end uint64) ([]*types.Block, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
blocks = make([]*types.Block, end-start+1)
|
||||
@@ -148,38 +147,6 @@ func GetL1BlocksInRange(ctx context.Context, cli *ethclient.Client, start, end u
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// GetL2BlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
func GetL2BlocksInRange(ctx context.Context, cli *ethclient.Client, start, end uint64) ([]*types.BlockWithRowConsumption, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
blocks = make([]*types.BlockWithRowConsumption, end-start+1)
|
||||
concurrency = 32
|
||||
sem = make(chan struct{}, concurrency)
|
||||
)
|
||||
|
||||
for i := start; i <= end; i++ {
|
||||
sem <- struct{}{} // Acquire a slot in the semaphore
|
||||
blockNum := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(i))
|
||||
index := i - start
|
||||
eg.Go(func() error {
|
||||
defer func() { <-sem }() // Release the slot when done
|
||||
block, err := cli.GetBlockByNumberOrHash(ctx, blockNum)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch block number", "number", blockNum, "error", err)
|
||||
return err
|
||||
}
|
||||
blocks[index] = block
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
log.Error("Error waiting for block fetching routines", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// ConvertBigIntArrayToString convert the big int array to string
|
||||
func ConvertBigIntArrayToString(array []*big.Int) string {
|
||||
stringArray := make([]string, len(array))
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Start from the latest golang base image
|
||||
FROM golang:1.20
|
||||
FROM golang:1.21
|
||||
|
||||
# Install Docker
|
||||
RUN apt-get update && apt-get install -y docker.io
|
||||
RUN apt-get update && apt-get install -y docker.io docker-compose
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /go/src/app
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
profile_name=$1
|
||||
|
||||
exclude_dirs=("scroll-tech/rollup/cmd" "scroll-tech/rollup/tests" "scroll-tech/rollup/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
|
||||
|
||||
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
|
||||
coverpkg="scroll-tech/${profile_name}"
|
||||
|
||||
for pkg in $all_packages; do
|
||||
exclude_pkg=false
|
||||
for exclude_dir in "${exclude_dirs[@]}"; do
|
||||
if [[ $pkg == $exclude_dir* ]]; then
|
||||
exclude_pkg=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$exclude_pkg" = false ]; then
|
||||
coverpkg="$coverpkg,$pkg/..."
|
||||
fi
|
||||
done
|
||||
|
||||
echo "coverage.${profile_name}.txt"
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="$coverpkg" -coverprofile=../coverage.${profile_name}.txt -covermode=atomic ./...
|
||||
7
common/docker-compose/l1/.gitignore
vendored
Normal file
7
common/docker-compose/l1/.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
consensus/beacondata*
|
||||
consensus/genesis.ssz
|
||||
consensus/validatordata*
|
||||
consensus/data*
|
||||
execution/geth
|
||||
execution/geth.ipc
|
||||
execution/data*
|
||||
24
common/docker-compose/l1/clean.sh
Executable file
24
common/docker-compose/l1/clean.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Loop until no Docker containers matching 'posl1' are found
|
||||
while : ; do
|
||||
containers=$(docker ps -a --format '{{.Names}}' | grep posl1)
|
||||
if [[ -z "$containers" ]]; then
|
||||
break
|
||||
fi
|
||||
echo "find the following containers, removing..."
|
||||
echo "$containers"
|
||||
echo "$containers" | xargs -r docker stop
|
||||
echo "$containers" | xargs -r docker rm -f || echo "Warning: Failed to remove some containers."
|
||||
done
|
||||
|
||||
# Loop until no Docker networks matching 'posl1' are found
|
||||
while : ; do
|
||||
networks=$(docker network ls --format '{{.ID}} {{.Name}}' | grep posl1 | awk '{print $1}')
|
||||
if [[ -z "$networks" ]]; then
|
||||
break
|
||||
fi
|
||||
echo "find the following networks, removing..."
|
||||
echo "$networks"
|
||||
echo "$networks" | xargs -r docker network rm || echo "Warning: Failed to remove some networks."
|
||||
done
|
||||
30
common/docker-compose/l1/consensus/config.yml
Normal file
30
common/docker-compose/l1/consensus/config.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
CONFIG_NAME: interop
|
||||
PRESET_BASE: interop
|
||||
|
||||
# Genesis
|
||||
GENESIS_FORK_VERSION: 0x20000089
|
||||
|
||||
# Altair
|
||||
ALTAIR_FORK_EPOCH: 0
|
||||
ALTAIR_FORK_VERSION: 0x20000090
|
||||
|
||||
# Merge
|
||||
BELLATRIX_FORK_EPOCH: 0
|
||||
BELLATRIX_FORK_VERSION: 0x20000091
|
||||
TERMINAL_TOTAL_DIFFICULTY: 0
|
||||
|
||||
# Capella
|
||||
CAPELLA_FORK_EPOCH: 0
|
||||
CAPELLA_FORK_VERSION: 0x20000092
|
||||
MAX_WITHDRAWALS_PER_PAYLOAD: 16
|
||||
|
||||
# Deneb
|
||||
DENEB_FORK_EPOCH: 0
|
||||
DENEB_FORK_VERSION: 0x20000093
|
||||
|
||||
# Time parameters
|
||||
SECONDS_PER_SLOT: 2
|
||||
SLOTS_PER_EPOCH: 2
|
||||
|
||||
# Deposit contract
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242
|
||||
127
common/docker-compose/l1/docker-compose.go
Normal file
127
common/docker-compose/l1/docker-compose.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package dockercompose
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/log"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
tc "github.com/testcontainers/testcontainers-go/modules/compose"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
// PoSL1TestEnv represents the config needed to test in PoS Layer 1.
|
||||
type PoSL1TestEnv struct {
|
||||
dockerComposeFile string
|
||||
compose tc.ComposeStack
|
||||
gethHTTPPort int
|
||||
hostPath string
|
||||
}
|
||||
|
||||
// NewPoSL1TestEnv creates and initializes a new instance of PoSL1TestEnv with a random HTTP port.
|
||||
func NewPoSL1TestEnv() (*PoSL1TestEnv, error) {
|
||||
rootDir, err := findProjectRootDir()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find project root directory: %v", err)
|
||||
}
|
||||
|
||||
hostPath, found := os.LookupEnv("HOST_PATH")
|
||||
if !found {
|
||||
hostPath = ""
|
||||
}
|
||||
|
||||
rnd, err := rand.Int(rand.Reader, big.NewInt(65536-1024))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate a random: %v", err)
|
||||
}
|
||||
gethHTTPPort := int(rnd.Int64()) + 1024
|
||||
|
||||
if err := os.Setenv("GETH_HTTP_PORT", fmt.Sprintf("%d", gethHTTPPort)); err != nil {
|
||||
return nil, fmt.Errorf("failed to set GETH_HTTP_PORT: %v", err)
|
||||
}
|
||||
|
||||
return &PoSL1TestEnv{
|
||||
dockerComposeFile: filepath.Join(rootDir, "common", "docker-compose", "l1", "docker-compose.yml"),
|
||||
gethHTTPPort: gethHTTPPort,
|
||||
hostPath: hostPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start starts the PoS L1 test environment by running the associated Docker Compose configuration.
|
||||
func (e *PoSL1TestEnv) Start() error {
|
||||
var err error
|
||||
e.compose, err = tc.NewDockerCompose([]string{e.dockerComposeFile}...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create docker compose: %w", err)
|
||||
}
|
||||
|
||||
env := map[string]string{
|
||||
"GETH_HTTP_PORT": fmt.Sprintf("%d", e.gethHTTPPort),
|
||||
}
|
||||
|
||||
if e.hostPath != "" {
|
||||
env["HOST_PATH"] = e.hostPath
|
||||
}
|
||||
|
||||
if err = e.compose.WaitForService("geth", wait.NewHTTPStrategy("/").WithPort("8545/tcp").WithStartupTimeout(15*time.Second)).WithEnv(env).Up(context.Background()); err != nil {
|
||||
if errStop := e.Stop(); errStop != nil {
|
||||
log.Error("failed to stop PoS L1 test environment", "err", errStop)
|
||||
}
|
||||
return fmt.Errorf("failed to start PoS L1 test environment: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the PoS L1 test environment by stopping and removing the associated Docker Compose services.
|
||||
func (e *PoSL1TestEnv) Stop() error {
|
||||
if e.compose != nil {
|
||||
if err := e.compose.Down(context.Background(), tc.RemoveOrphans(true), tc.RemoveVolumes(true), tc.RemoveImagesLocal); err != nil {
|
||||
return fmt.Errorf("failed to stop PoS L1 test environment: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Endpoint returns the HTTP endpoint for the PoS L1 test environment.
|
||||
func (e *PoSL1TestEnv) Endpoint() string {
|
||||
return fmt.Sprintf("http://127.0.0.1:%d", e.gethHTTPPort)
|
||||
}
|
||||
|
||||
// L1Client returns an ethclient by dialing the running PoS L1 test environment
|
||||
func (e *PoSL1TestEnv) L1Client() (*ethclient.Client, error) {
|
||||
if e == nil {
|
||||
return nil, fmt.Errorf("PoS L1 test environment is not initialized")
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial(e.Endpoint())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial PoS L1 test environment: %w", err)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func findProjectRootDir() (string, error) {
|
||||
currentDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get working directory: %w", err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, err := os.Stat(filepath.Join(currentDir, "go.work"))
|
||||
if err == nil {
|
||||
return currentDir, nil
|
||||
}
|
||||
|
||||
parentDir := filepath.Dir(currentDir)
|
||||
if parentDir == currentDir {
|
||||
return "", fmt.Errorf("go.work file not found in any parent directory")
|
||||
}
|
||||
|
||||
currentDir = parentDir
|
||||
}
|
||||
}
|
||||
127
common/docker-compose/l1/docker-compose.yml
Normal file
127
common/docker-compose/l1/docker-compose.yml
Normal file
@@ -0,0 +1,127 @@
|
||||
version: "3.9"
|
||||
services:
|
||||
initialize-env:
|
||||
image: "alpine:3.19.0"
|
||||
command:
|
||||
/bin/sh -c "mkdir -p /data/consensus &&
|
||||
cp -a /consensus/* /data/consensus/ &&
|
||||
mkdir -p /data/execution &&
|
||||
cp -a /execution/* /data/execution/"
|
||||
volumes:
|
||||
- ${HOST_PATH:-../../..}/common/docker-compose/l1/consensus:/consensus
|
||||
- ${HOST_PATH:-../../..}/common/docker-compose/l1/execution:/execution
|
||||
- data:/data
|
||||
|
||||
# Creates a genesis state for the beacon chain using a YAML configuration file and
|
||||
# a deterministic set of 64 validators.
|
||||
create-beacon-chain-genesis:
|
||||
image: "gcr.io/prysmaticlabs/prysm/cmd/prysmctl:latest"
|
||||
command:
|
||||
- testnet
|
||||
- generate-genesis
|
||||
- --fork=deneb
|
||||
- --num-validators=64
|
||||
- --genesis-time-delay=3
|
||||
- --output-ssz=/data/consensus/genesis.ssz
|
||||
- --chain-config-file=/data/consensus/config.yml
|
||||
- --geth-genesis-json-in=/data/execution/genesis.json
|
||||
- --geth-genesis-json-out=/data/execution/genesis.json
|
||||
volumes:
|
||||
- data:/data
|
||||
depends_on:
|
||||
initialize-env:
|
||||
condition: service_completed_successfully
|
||||
|
||||
# Sets up the genesis configuration for the go-ethereum client from a JSON file.
|
||||
geth-genesis:
|
||||
image: "ethereum/client-go:v1.13.14"
|
||||
command: --datadir=/data/execution init /data/execution/genesis.json
|
||||
volumes:
|
||||
- data:/data
|
||||
depends_on:
|
||||
create-beacon-chain-genesis:
|
||||
condition: service_completed_successfully
|
||||
initialize-env:
|
||||
condition: service_completed_successfully
|
||||
|
||||
# Runs a Prysm beacon chain from a specified genesis state created in the previous step
|
||||
# and connects to go-ethereum in the same network as the execution client.
|
||||
# The account used in go-ethereum is set as the suggested fee recipient for transactions
|
||||
# proposed via the validators attached to the beacon node.
|
||||
beacon-chain:
|
||||
image: "gcr.io/prysmaticlabs/prysm/beacon-chain:v5.0.0"
|
||||
command:
|
||||
- --datadir=/data/consensus/beacondata
|
||||
# No peers to sync with in this testnet, so setting to 0
|
||||
- --min-sync-peers=0
|
||||
- --genesis-state=/data/consensus/genesis.ssz
|
||||
- --bootstrap-node=
|
||||
- --interop-eth1data-votes
|
||||
# The chain configuration file used for setting up Prysm
|
||||
- --chain-config-file=/data/consensus/config.yml
|
||||
# We specify the chain id used by our execution client
|
||||
- --contract-deployment-block=0
|
||||
- --chain-id=${CHAIN_ID:-32382}
|
||||
- --rpc-host=0.0.0.0
|
||||
- --grpc-gateway-host=0.0.0.0
|
||||
- --execution-endpoint=http://geth:8551
|
||||
- --accept-terms-of-use
|
||||
- --jwt-secret=/data/execution/jwtsecret
|
||||
- --suggested-fee-recipient=0x123463a4b065722e99115d6c222f267d9cabb524
|
||||
- --minimum-peers-per-subnet=0
|
||||
- --enable-debug-rpc-endpoints
|
||||
- --force-clear-db
|
||||
depends_on:
|
||||
create-beacon-chain-genesis:
|
||||
condition: service_completed_successfully
|
||||
volumes:
|
||||
- data:/data
|
||||
|
||||
# Runs the go-ethereum execution client with the specified, unlocked account and necessary
|
||||
# APIs to allow for proof-of-stake consensus via Prysm.
|
||||
geth:
|
||||
image: "ethereum/client-go:v1.13.14"
|
||||
command:
|
||||
- --http
|
||||
- --http.api=eth,net,web3
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.corsdomain=*
|
||||
- --authrpc.vhosts=*
|
||||
- --authrpc.addr=0.0.0.0
|
||||
- --authrpc.jwtsecret=/data/execution/jwtsecret
|
||||
- --datadir=/data/execution
|
||||
- --allow-insecure-unlock
|
||||
- --unlock=0x123463a4b065722e99115d6c222f267d9cabb524
|
||||
- --password=/data/execution/geth_password.txt
|
||||
- --nodiscover
|
||||
- --syncmode=full
|
||||
ports:
|
||||
- ${GETH_HTTP_PORT:-8545}:8545
|
||||
depends_on:
|
||||
geth-genesis:
|
||||
condition: service_completed_successfully
|
||||
beacon-chain:
|
||||
condition: service_started
|
||||
volumes:
|
||||
- data:/data
|
||||
|
||||
# We run a validator client with 64, deterministically-generated keys that match
|
||||
# The validator keys present in the beacon chain genesis state generated a few steps above.
|
||||
validator:
|
||||
image: "gcr.io/prysmaticlabs/prysm/validator:v5.0.0"
|
||||
command:
|
||||
- --beacon-rpc-provider=beacon-chain:4000
|
||||
- --datadir=/data/consensus/validatordata
|
||||
- --accept-terms-of-use
|
||||
- --interop-num-validators=64
|
||||
- --interop-start-index=0
|
||||
- --chain-config-file=/data/consensus/config.yml
|
||||
- --force-clear-db
|
||||
depends_on:
|
||||
beacon-chain:
|
||||
condition: service_started
|
||||
volumes:
|
||||
- data:/data
|
||||
|
||||
volumes:
|
||||
data:
|
||||
57
common/docker-compose/l1/execution/genesis.json
Normal file
57
common/docker-compose/l1/execution/genesis.json
Normal file
File diff suppressed because one or more lines are too long
1
common/docker-compose/l1/execution/jwtsecret
Normal file
1
common/docker-compose/l1/execution/jwtsecret
Normal file
@@ -0,0 +1 @@
|
||||
0xfad2709d0bb03bf0e8ba3c99bea194575d3e98863133d1af638ed056d1d59345
|
||||
@@ -0,0 +1 @@
|
||||
{"address":"123463a4b065722e99115d6c222f267d9cabb524","crypto":{"cipher":"aes-128-ctr","ciphertext":"93b90389b855889b9f91c89fd15b9bd2ae95b06fe8e2314009fc88859fc6fde9","cipherparams":{"iv":"9dc2eff7967505f0e6a40264d1511742"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"c07503bb1b66083c37527cd8f06f8c7c1443d4c724767f625743bd47ae6179a4"},"mac":"6d359be5d6c432d5bbb859484009a4bf1bd71b76e89420c380bd0593ce25a817"},"id":"622df904-0bb1-4236-b254-f1b8dfdff1ec","version":3}
|
||||
@@ -48,7 +48,7 @@ type App struct {
|
||||
Timestamp int
|
||||
}
|
||||
|
||||
// NewDockerApp returns new instance of dokerApp struct
|
||||
// NewDockerApp returns new instance of dockerApp struct
|
||||
func NewDockerApp() *App {
|
||||
timestamp := time.Now().Nanosecond()
|
||||
app := &App{
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
@@ -75,7 +74,7 @@ func (i *ImgDB) Stop() error {
|
||||
return err
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
|
||||
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
// Endpoint return the dsn.
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
|
||||
@@ -146,7 +145,7 @@ func (i *ImgGeth) Stop() error {
|
||||
i.id = id
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
|
||||
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
func (i *ImgGeth) params() []string {
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
@@ -40,7 +40,7 @@ type GethImgInstance interface {
|
||||
func GetContainerID(name string) string {
|
||||
filter := filters.NewArgs()
|
||||
filter.Add("name", name)
|
||||
lst, _ := cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
lst, _ := cli.ContainerList(context.Background(), container.ListOptions{
|
||||
Filters: filter,
|
||||
})
|
||||
if len(lst) > 0 {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ethereum/client-go
|
||||
FROM ethereum/client-go:v1.13.14
|
||||
|
||||
COPY password /l1geth/
|
||||
COPY genesis.json /l1geth/
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"archimedesBlock": 0,
|
||||
"shanghaiBlock": 0,
|
||||
"clique": {
|
||||
"period": 3,
|
||||
"period": 1,
|
||||
"epoch": 30000
|
||||
},
|
||||
"scroll": {
|
||||
|
||||
89
common/forks/forks.go
Normal file
89
common/forks/forks.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package forks
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
)
|
||||
|
||||
// CollectSortedForkHeights returns a sorted set of block numbers that one or more forks are activated on
|
||||
func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]bool, map[string]uint64) {
|
||||
type nameFork struct {
|
||||
name string
|
||||
block *big.Int
|
||||
}
|
||||
|
||||
forkHeightNameMap := make(map[uint64]string)
|
||||
|
||||
for _, fork := range []nameFork{
|
||||
{name: "homestead", block: config.HomesteadBlock},
|
||||
{name: "daoFork", block: config.DAOForkBlock},
|
||||
{name: "eip150", block: config.EIP150Block},
|
||||
{name: "eip155", block: config.EIP155Block},
|
||||
{name: "eip158", block: config.EIP158Block},
|
||||
{name: "byzantium", block: config.ByzantiumBlock},
|
||||
{name: "constantinople", block: config.ConstantinopleBlock},
|
||||
{name: "petersburg", block: config.PetersburgBlock},
|
||||
{name: "istanbul", block: config.IstanbulBlock},
|
||||
{name: "muirGlacier", block: config.MuirGlacierBlock},
|
||||
{name: "berlin", block: config.BerlinBlock},
|
||||
{name: "london", block: config.LondonBlock},
|
||||
{name: "arrowGlacier", block: config.ArrowGlacierBlock},
|
||||
{name: "archimedes", block: config.ArchimedesBlock},
|
||||
{name: "shanghai", block: config.ShanghaiBlock},
|
||||
{name: "bernoulli", block: config.BernoulliBlock},
|
||||
{name: "curie", block: config.CurieBlock},
|
||||
} {
|
||||
if fork.block == nil {
|
||||
continue
|
||||
}
|
||||
height := fork.block.Uint64()
|
||||
|
||||
// only keep latest fork for at each height, discard the rest
|
||||
forkHeightNameMap[height] = fork.name
|
||||
}
|
||||
|
||||
forkHeightsMap := make(map[uint64]bool)
|
||||
forkNameHeightMap := make(map[string]uint64)
|
||||
|
||||
for height, name := range forkHeightNameMap {
|
||||
forkHeightsMap[height] = true
|
||||
forkNameHeightMap[name] = height
|
||||
}
|
||||
|
||||
var forkHeights []uint64
|
||||
for height := range forkHeightsMap {
|
||||
forkHeights = append(forkHeights, height)
|
||||
}
|
||||
sort.Slice(forkHeights, func(i, j int) bool {
|
||||
return forkHeights[i] < forkHeights[j]
|
||||
})
|
||||
return forkHeights, forkHeightsMap, forkNameHeightMap
|
||||
}
|
||||
|
||||
// BlocksUntilFork returns the number of blocks until the next fork
|
||||
// returns 0 if there is no fork scheduled for the future
|
||||
func BlocksUntilFork(blockHeight uint64, forkHeights []uint64) uint64 {
|
||||
for _, forkHeight := range forkHeights {
|
||||
if forkHeight > blockHeight {
|
||||
return forkHeight - blockHeight
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// BlockRange returns the block range of the hard fork
|
||||
// Need ensure the forkHeights is incremental
|
||||
func BlockRange(currentForkHeight uint64, forkHeights []uint64) (from, to uint64) {
|
||||
to = math.MaxInt64
|
||||
for _, height := range forkHeights {
|
||||
if currentForkHeight < height {
|
||||
to = height
|
||||
return
|
||||
}
|
||||
from = height
|
||||
}
|
||||
return
|
||||
}
|
||||
142
common/forks/forks_test.go
Normal file
142
common/forks/forks_test.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package forks
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCollectSortedForkBlocks(t *testing.T) {
|
||||
l, m, n := CollectSortedForkHeights(¶ms.ChainConfig{
|
||||
ArchimedesBlock: big.NewInt(0),
|
||||
ShanghaiBlock: big.NewInt(3),
|
||||
BernoulliBlock: big.NewInt(3),
|
||||
CurieBlock: big.NewInt(4),
|
||||
})
|
||||
require.Equal(t, l, []uint64{
|
||||
0,
|
||||
3,
|
||||
4,
|
||||
})
|
||||
require.Equal(t, map[uint64]bool{
|
||||
3: true,
|
||||
4: true,
|
||||
0: true,
|
||||
}, m)
|
||||
require.Equal(t, map[string]uint64{
|
||||
"archimedes": 0,
|
||||
"bernoulli": 3,
|
||||
"curie": 4,
|
||||
}, n)
|
||||
}
|
||||
|
||||
func TestBlocksUntilFork(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
block uint64
|
||||
forks []uint64
|
||||
expected uint64
|
||||
}{
|
||||
"NoFork": {
|
||||
block: 44,
|
||||
forks: []uint64{},
|
||||
expected: 0,
|
||||
},
|
||||
"BeforeFork": {
|
||||
block: 0,
|
||||
forks: []uint64{1, 5},
|
||||
expected: 1,
|
||||
},
|
||||
"OnFork": {
|
||||
block: 1,
|
||||
forks: []uint64{1, 5},
|
||||
expected: 4,
|
||||
},
|
||||
"OnLastFork": {
|
||||
block: 5,
|
||||
forks: []uint64{1, 5},
|
||||
expected: 0,
|
||||
},
|
||||
"AfterFork": {
|
||||
block: 5,
|
||||
forks: []uint64{1, 5},
|
||||
expected: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
require.Equal(t, test.expected, BlocksUntilFork(test.block, test.forks))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
forkHeight uint64
|
||||
forkHeights []uint64
|
||||
expectedFrom uint64
|
||||
expectedTo uint64
|
||||
}{
|
||||
{
|
||||
name: "ToInfinite",
|
||||
forkHeight: 300,
|
||||
forkHeights: []uint64{100, 200, 300},
|
||||
expectedFrom: 300,
|
||||
expectedTo: math.MaxInt64,
|
||||
},
|
||||
{
|
||||
name: "To300",
|
||||
forkHeight: 200,
|
||||
forkHeights: []uint64{100, 200, 300},
|
||||
expectedFrom: 200,
|
||||
expectedTo: 300,
|
||||
},
|
||||
{
|
||||
name: "To200",
|
||||
forkHeight: 100,
|
||||
forkHeights: []uint64{100, 200, 300},
|
||||
expectedFrom: 100,
|
||||
expectedTo: 200,
|
||||
},
|
||||
{
|
||||
name: "To100",
|
||||
forkHeight: 0,
|
||||
forkHeights: []uint64{100, 200, 300},
|
||||
expectedFrom: 0,
|
||||
expectedTo: 100,
|
||||
},
|
||||
{
|
||||
name: "To200-1",
|
||||
forkHeight: 100,
|
||||
forkHeights: []uint64{100, 200},
|
||||
expectedFrom: 100,
|
||||
expectedTo: 200,
|
||||
},
|
||||
{
|
||||
name: "To2",
|
||||
forkHeight: 1,
|
||||
forkHeights: []uint64{1, 2},
|
||||
expectedFrom: 1,
|
||||
expectedTo: 2,
|
||||
},
|
||||
{
|
||||
name: "ToInfinite-1",
|
||||
forkHeight: 0,
|
||||
forkHeights: []uint64{0},
|
||||
expectedFrom: 0,
|
||||
expectedTo: math.MaxInt64,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
from, to := BlockRange(test.forkHeight, test.forkHeights)
|
||||
require.Equal(t, test.expectedFrom, from)
|
||||
require.Equal(t, test.expectedTo, to)
|
||||
})
|
||||
}
|
||||
}
|
||||
199
common/go.mod
199
common/go.mod
@@ -1,10 +1,12 @@
|
||||
module scroll-tech/common
|
||||
|
||||
go 1.20
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/bits-and-blooms/bitset v1.12.0
|
||||
github.com/docker/docker v24.0.7+incompatible
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004
|
||||
github.com/docker/docker v25.0.3+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
@@ -13,48 +15,116 @@ require (
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/testcontainers/testcontainers-go v0.29.1
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
gorm.io/gorm v1.25.5
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/buger/goterm v1.0.4 // indirect
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.2 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/containerd v1.7.12 // indirect
|
||||
github.com/containerd/continuity v0.4.2 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/buildx v0.12.0-rc2.0.20231219140829-617f538cb315 // indirect
|
||||
github.com/docker/cli v25.0.1+incompatible // indirect
|
||||
github.com/docker/compose/v2 v2.24.3 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.0 // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsevents v0.1.1 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.2 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.15.5 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.1 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/googleapis v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.0 // indirect
|
||||
@@ -62,50 +132,137 @@ require (
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jonboulle/clockwork v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/moby/buildkit v0.13.0-beta1.0.20231219135447-957cb50df991 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.1 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/signal v0.7.0 // indirect
|
||||
github.com/moby/sys/symlink v0.2.0 // indirect
|
||||
github.com/moby/sys/user v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.8.1 // indirect
|
||||
github.com/onsi/gomega v1.27.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.7.1 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/cobra v1.8.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
|
||||
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.16.0 // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/net v0.18.0 // indirect
|
||||
golang.org/x/sync v0.5.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/oauth2 v0.11.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/term v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.15.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/grpc v1.59.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
gotest.tools/v3 v3.4.0 // indirect
|
||||
k8s.io/api v0.26.7 // indirect
|
||||
k8s.io/apimachinery v0.26.7 // indirect
|
||||
k8s.io/apiserver v0.26.7 // indirect
|
||||
k8s.io/client-go v0.26.7 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
tags.cncf.io/container-device-interface v0.6.2 // indirect
|
||||
)
|
||||
|
||||
707
common/go.sum
707
common/go.sum
File diff suppressed because it is too large
Load Diff
22
common/libzkp/impl/Cargo.lock
generated
22
common/libzkp/impl/Cargo.lock
generated
@@ -31,7 +31,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
@@ -333,7 +333,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -959,7 +959,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1116,7 +1116,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1296,7 +1296,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1328,7 +1328,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -1937,7 +1937,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2135,7 +2135,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2151,7 +2151,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"halo2-mpt-circuits",
|
||||
@@ -2582,7 +2582,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4125,7 +4125,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.8#8f17df87ba70f5a8fcaa23f4fcb7fb112f5a815a"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.9#79153a9eccd64e9bfa069e2ed7aba3bc7edb02fc"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
|
||||
@@ -21,7 +21,7 @@ halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch =
|
||||
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.8", default-features = false, features = ["parallel_syn", "scroll", "shanghai", "strict-ccc"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.9", default-features = false, features = ["parallel_syn", "scroll", "shanghai", "strict-ccc"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
4
common/testdata/blockTrace_03.json
vendored
4
common/testdata/blockTrace_03.json
vendored
@@ -31,9 +31,11 @@
|
||||
{
|
||||
"type": 2,
|
||||
"nonce": 2,
|
||||
"txHash": "0x6b50040f5f14bad253f202b0775d6742131bcaee6b992f05578386f00e53b7e4",
|
||||
"txHash": "0xfc4325f39825616a241d0c52d536c892e21b6df32bb75d8bdc70de54ae175318",
|
||||
"gas": 1152994,
|
||||
"gasPrice": "0x3b9b0a17",
|
||||
"gasTipCap": "0x3b9b0a17",
|
||||
"gasFeeCap": "0x3b9b0a17",
|
||||
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"to": null,
|
||||
"chainId": "0xcf55",
|
||||
|
||||
995
common/testdata/blockTrace_delegate.json
vendored
995
common/testdata/blockTrace_delegate.json
vendored
File diff suppressed because one or more lines are too long
@@ -1,168 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// BatchMeta contains metadata of a batch.
|
||||
type BatchMeta struct {
|
||||
StartChunkIndex uint64
|
||||
StartChunkHash string
|
||||
EndChunkIndex uint64
|
||||
EndChunkHash string
|
||||
TotalL1CommitGas uint64
|
||||
TotalL1CommitCalldataSize uint32
|
||||
}
|
||||
|
||||
// BatchHeader contains batch header info to be committed.
|
||||
type BatchHeader struct {
|
||||
// Encoded in BatchHeaderV0Codec
|
||||
version uint8
|
||||
batchIndex uint64
|
||||
l1MessagePopped uint64
|
||||
totalL1MessagePopped uint64
|
||||
dataHash common.Hash
|
||||
parentBatchHash common.Hash
|
||||
skippedL1MessageBitmap []byte
|
||||
}
|
||||
|
||||
// NewBatchHeader creates a new BatchHeader
|
||||
func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64, parentBatchHash common.Hash, chunks []*Chunk) (*BatchHeader, error) {
|
||||
// buffer for storing chunk hashes in order to compute the batch data hash
|
||||
var dataBytes []byte
|
||||
|
||||
// skipped L1 message bitmap, an array of 256-bit bitmaps
|
||||
var skippedBitmap []*big.Int
|
||||
|
||||
// the first queue index that belongs to this batch
|
||||
baseIndex := totalL1MessagePoppedBefore
|
||||
|
||||
// the next queue index that we need to process
|
||||
nextIndex := totalL1MessagePoppedBefore
|
||||
|
||||
for chunkID, chunk := range chunks {
|
||||
// build data hash
|
||||
totalL1MessagePoppedBeforeChunk := nextIndex
|
||||
chunkHash, err := chunk.Hash(totalL1MessagePoppedBeforeChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dataBytes = append(dataBytes, chunkHash.Bytes()...)
|
||||
|
||||
// build skip bitmap
|
||||
for blockID, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
currentIndex := tx.Nonce
|
||||
|
||||
if currentIndex < nextIndex {
|
||||
return nil, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d. Batch index: %d, chunk index in batch: %d, block index in chunk: %d, block hash: %v, transaction hash: %v", nextIndex, currentIndex, batchIndex, chunkID, blockID, block.Header.Hash(), tx.TxHash)
|
||||
}
|
||||
|
||||
// mark skipped messages
|
||||
for skippedIndex := nextIndex; skippedIndex < currentIndex; skippedIndex++ {
|
||||
quo := int((skippedIndex - baseIndex) / 256)
|
||||
rem := int((skippedIndex - baseIndex) % 256)
|
||||
for len(skippedBitmap) <= quo {
|
||||
bitmap := big.NewInt(0)
|
||||
skippedBitmap = append(skippedBitmap, bitmap)
|
||||
}
|
||||
skippedBitmap[quo].SetBit(skippedBitmap[quo], rem, 1)
|
||||
}
|
||||
|
||||
// process included message
|
||||
quo := int((currentIndex - baseIndex) / 256)
|
||||
for len(skippedBitmap) <= quo {
|
||||
bitmap := big.NewInt(0)
|
||||
skippedBitmap = append(skippedBitmap, bitmap)
|
||||
}
|
||||
|
||||
nextIndex = currentIndex + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// compute data hash
|
||||
dataHash := crypto.Keccak256Hash(dataBytes)
|
||||
|
||||
// compute skipped bitmap
|
||||
bitmapBytes := make([]byte, len(skippedBitmap)*32)
|
||||
for ii, num := range skippedBitmap {
|
||||
bytes := num.Bytes()
|
||||
padding := 32 - len(bytes)
|
||||
copy(bitmapBytes[32*ii+padding:], bytes)
|
||||
}
|
||||
|
||||
return &BatchHeader{
|
||||
version: version,
|
||||
batchIndex: batchIndex,
|
||||
l1MessagePopped: nextIndex - totalL1MessagePoppedBefore,
|
||||
totalL1MessagePopped: nextIndex,
|
||||
dataHash: dataHash,
|
||||
parentBatchHash: parentBatchHash,
|
||||
skippedL1MessageBitmap: bitmapBytes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Version returns the version of the BatchHeader.
|
||||
func (b *BatchHeader) Version() uint8 {
|
||||
return b.version
|
||||
}
|
||||
|
||||
// BatchIndex returns the batch index of the BatchHeader.
|
||||
func (b *BatchHeader) BatchIndex() uint64 {
|
||||
return b.batchIndex
|
||||
}
|
||||
|
||||
// TotalL1MessagePopped returns the total number of L1 messages popped in the BatchHeader.
|
||||
func (b *BatchHeader) TotalL1MessagePopped() uint64 {
|
||||
return b.totalL1MessagePopped
|
||||
}
|
||||
|
||||
// SkippedL1MessageBitmap returns the skipped L1 message bitmap in the BatchHeader.
|
||||
func (b *BatchHeader) SkippedL1MessageBitmap() []byte {
|
||||
return b.skippedL1MessageBitmap
|
||||
}
|
||||
|
||||
// Encode encodes the BatchHeader into RollupV2 BatchHeaderV0Codec Encoding.
|
||||
func (b *BatchHeader) Encode() []byte {
|
||||
batchBytes := make([]byte, 89+len(b.skippedL1MessageBitmap))
|
||||
batchBytes[0] = b.version
|
||||
binary.BigEndian.PutUint64(batchBytes[1:], b.batchIndex)
|
||||
binary.BigEndian.PutUint64(batchBytes[9:], b.l1MessagePopped)
|
||||
binary.BigEndian.PutUint64(batchBytes[17:], b.totalL1MessagePopped)
|
||||
copy(batchBytes[25:], b.dataHash[:])
|
||||
copy(batchBytes[57:], b.parentBatchHash[:])
|
||||
copy(batchBytes[89:], b.skippedL1MessageBitmap[:])
|
||||
return batchBytes
|
||||
}
|
||||
|
||||
// Hash calculates the hash of the batch header.
|
||||
func (b *BatchHeader) Hash() common.Hash {
|
||||
return crypto.Keccak256Hash(b.Encode())
|
||||
}
|
||||
|
||||
// DecodeBatchHeader attempts to decode the given byte slice into a BatchHeader.
|
||||
func DecodeBatchHeader(data []byte) (*BatchHeader, error) {
|
||||
if len(data) < 89 {
|
||||
return nil, fmt.Errorf("insufficient data for BatchHeader")
|
||||
}
|
||||
b := &BatchHeader{
|
||||
version: data[0],
|
||||
batchIndex: binary.BigEndian.Uint64(data[1:9]),
|
||||
l1MessagePopped: binary.BigEndian.Uint64(data[9:17]),
|
||||
totalL1MessagePopped: binary.BigEndian.Uint64(data[17:25]),
|
||||
dataHash: common.BytesToHash(data[25:57]),
|
||||
parentBatchHash: common.BytesToHash(data[57:89]),
|
||||
skippedL1MessageBitmap: data[89:],
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
@@ -1,251 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewBatchHeader(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
parentBatchHeader := &BatchHeader{
|
||||
version: 1,
|
||||
batchIndex: 0,
|
||||
l1MessagePopped: 0,
|
||||
totalL1MessagePopped: 0,
|
||||
dataHash: common.HexToHash("0x0"),
|
||||
parentBatchHash: common.HexToHash("0x0"),
|
||||
skippedL1MessageBitmap: nil,
|
||||
}
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, 0, len(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// 1 L1 Msg in 1 bitmap
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap := "00000000000000000000000000000000000000000000000000000000000003ff" // skip first 10
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs
|
||||
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_05.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock3 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace3, wrappedBlock3))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock3,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 37, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(5), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "0000000000000000000000000000000000000000000000000000000000000000" // all bits are included, so none are skipped
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many consecutive L1 Msgs in 1 bitmap, with leading skipped msgs
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock3,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(42), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "0000000000000000000000000000000000000000000000000000001fffffffff" // skipped the first 37 messages
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many sparse L1 Msgs in 1 bitmap
|
||||
templateBlockTrace4, err := os.ReadFile("../testdata/blockTrace_06.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock4 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace4, wrappedBlock4))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock4,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(10), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 32, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "00000000000000000000000000000000000000000000000000000000000001dd" // 0111011101
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
|
||||
// many L1 Msgs in each of 2 bitmaps
|
||||
templateBlockTrace5, err := os.ReadFile("../testdata/blockTrace_07.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock5 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace5, wrappedBlock5))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock5,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
assert.Equal(t, uint64(257), batchHeader.l1MessagePopped)
|
||||
assert.Equal(t, 64, len(batchHeader.skippedL1MessageBitmap))
|
||||
expectedBitmap = "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd0000000000000000000000000000000000000000000000000000000000000000"
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(batchHeader.skippedL1MessageBitmap))
|
||||
}
|
||||
|
||||
func TestBatchHeaderEncode(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
parentBatchHeader := &BatchHeader{
|
||||
version: 1,
|
||||
batchIndex: 0,
|
||||
l1MessagePopped: 0,
|
||||
totalL1MessagePopped: 0,
|
||||
dataHash: common.HexToHash("0x0"),
|
||||
parentBatchHash: common.HexToHash("0x0"),
|
||||
skippedL1MessageBitmap: nil,
|
||||
}
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
bytes := batchHeader.Encode()
|
||||
assert.Equal(t, 89, len(bytes))
|
||||
assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes))
|
||||
|
||||
// With L1 Msg
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
bytes = batchHeader.Encode()
|
||||
assert.Equal(t, 121, len(bytes))
|
||||
assert.Equal(t, "010000000000000001000000000000000b000000000000000b34f419ce7e882295bdb5aec6cce56ffa788a5fed4744d7fbd77e4acbf409f1ca4136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
|
||||
}
|
||||
|
||||
func TestBatchHeaderHash(t *testing.T) {
|
||||
// Without L1 Msg
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
parentBatchHeader := &BatchHeader{
|
||||
version: 1,
|
||||
batchIndex: 0,
|
||||
l1MessagePopped: 0,
|
||||
totalL1MessagePopped: 0,
|
||||
dataHash: common.HexToHash("0x0"),
|
||||
parentBatchHash: common.HexToHash("0x0"),
|
||||
skippedL1MessageBitmap: nil,
|
||||
}
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
hash := batchHeader.Hash()
|
||||
assert.Equal(t, "d69da4357da0073f4093c76e49f077e21bb52f48f57ee3e1fbd9c38a2881af81", common.Bytes2Hex(hash.Bytes()))
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock2))
|
||||
chunk2 := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
batchHeader2, err := NewBatchHeader(1, 2, 0, batchHeader.Hash(), []*Chunk{chunk2})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader2)
|
||||
hash2 := batchHeader2.Hash()
|
||||
assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes()))
|
||||
|
||||
// With L1 Msg
|
||||
templateBlockTrace3, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock3 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace3, wrappedBlock3))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock3,
|
||||
},
|
||||
}
|
||||
batchHeader, err = NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
hash = batchHeader.Hash()
|
||||
assert.Equal(t, "1c3007880f0eafe74572ede7d164ff1ee5376e9ac9bff6f7fb837b2630cddc9a", common.Bytes2Hex(hash.Bytes()))
|
||||
}
|
||||
|
||||
func TestBatchHeaderDecode(t *testing.T) {
|
||||
header := &BatchHeader{
|
||||
version: 1,
|
||||
batchIndex: 10,
|
||||
l1MessagePopped: 20,
|
||||
totalL1MessagePopped: 30,
|
||||
dataHash: common.HexToHash("0x01"),
|
||||
parentBatchHash: common.HexToHash("0x02"),
|
||||
skippedL1MessageBitmap: []byte{0x01, 0x02, 0x03},
|
||||
}
|
||||
|
||||
encoded := header.Encode()
|
||||
decoded, err := DecodeBatchHeader(encoded)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, header, decoded)
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
|
||||
const CalldataNonZeroByteGas = 16
|
||||
|
||||
// GetKeccak256Gas calculates the gas cost for computing the keccak256 hash of a given size.
|
||||
func GetKeccak256Gas(size uint64) uint64 {
|
||||
return GetMemoryExpansionCost(size) + 30 + 6*((size+31)/32)
|
||||
}
|
||||
|
||||
// GetMemoryExpansionCost calculates the cost of memory expansion for a given memoryByteSize.
|
||||
func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
|
||||
memorySizeWord := (memoryByteSize + 31) / 32
|
||||
memoryCost := (memorySizeWord*memorySizeWord)/512 + (3 * memorySizeWord)
|
||||
return memoryCost
|
||||
}
|
||||
|
||||
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
|
||||
type WrappedBlock struct {
|
||||
Header *types.Header `json:"header"`
|
||||
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
RowConsumption *types.RowConsumption `json:"row_consumption"`
|
||||
txPayloadLengthCache map[string]uint64
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this block.
|
||||
// This number is the sum of included and skipped L1 messages.
|
||||
func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
|
||||
var lastQueueIndex *uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
lastQueueIndex = &txData.Nonce
|
||||
}
|
||||
}
|
||||
if lastQueueIndex == nil {
|
||||
return 0
|
||||
}
|
||||
// note: last queue index included before this block is totalL1MessagePoppedBefore - 1
|
||||
// TODO: cache results
|
||||
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
|
||||
}
|
||||
|
||||
// NumL2Transactions returns the number of L2 transactions in this block.
|
||||
func (w *WrappedBlock) NumL2Transactions() uint64 {
|
||||
var count uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type != types.L1MessageTxType {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
|
||||
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
|
||||
bytes := make([]byte, 60)
|
||||
|
||||
if !w.Header.Number.IsUint64() {
|
||||
return nil, errors.New("block number is not uint64")
|
||||
}
|
||||
|
||||
// note: numL1Messages includes skipped messages
|
||||
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
if numL1Messages > math.MaxUint16 {
|
||||
return nil, errors.New("number of L1 messages exceeds max uint16")
|
||||
}
|
||||
|
||||
// note: numTransactions includes skipped messages
|
||||
numL2Transactions := w.NumL2Transactions()
|
||||
numTransactions := numL1Messages + numL2Transactions
|
||||
if numTransactions > math.MaxUint16 {
|
||||
return nil, errors.New("number of transactions exceeds max uint16")
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
|
||||
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
|
||||
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
|
||||
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
|
||||
binary.BigEndian.PutUint16(bytes[56:], uint16(numTransactions))
|
||||
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
|
||||
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
// EstimateL1CommitCalldataSize calculates the calldata size in l1 commit approximately.
|
||||
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
|
||||
// This needs to be adjusted in the future.
|
||||
func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
|
||||
var size uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
size += 4 // 4 bytes payload length
|
||||
size += w.getTxPayloadLength(txData)
|
||||
}
|
||||
size += 60 // 60 bytes BlockContext
|
||||
return size
|
||||
}
|
||||
|
||||
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
|
||||
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
var total uint64
|
||||
var numL1Messages uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
numL1Messages++
|
||||
continue
|
||||
}
|
||||
|
||||
txPayloadLength := w.getTxPayloadLength(txData)
|
||||
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
|
||||
total += CalldataNonZeroByteGas * 4 // 4 bytes payload length
|
||||
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
|
||||
}
|
||||
|
||||
// 60 bytes BlockContext calldata
|
||||
total += CalldataNonZeroByteGas * 60
|
||||
|
||||
// sload
|
||||
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
|
||||
|
||||
// staticcall
|
||||
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
|
||||
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue
|
||||
|
||||
total += GetMemoryExpansionCost(36) * numL1Messages // staticcall to proxy
|
||||
total += 100 * numL1Messages // read admin in proxy
|
||||
total += 100 * numL1Messages // read impl in proxy
|
||||
total += 100 * numL1Messages // access impl
|
||||
total += GetMemoryExpansionCost(36) * numL1Messages // delegatecall to impl
|
||||
|
||||
return total
|
||||
}
|
||||
|
||||
func (w *WrappedBlock) getTxPayloadLength(txData *types.TransactionData) uint64 {
|
||||
if w.txPayloadLengthCache == nil {
|
||||
w.txPayloadLengthCache = make(map[string]uint64)
|
||||
}
|
||||
|
||||
if length, exists := w.txPayloadLengthCache[txData.TxHash]; exists {
|
||||
return length
|
||||
}
|
||||
|
||||
rlpTxData, err := convertTxDataToRLPEncoding(txData)
|
||||
if err != nil {
|
||||
log.Crit("convertTxDataToRLPEncoding failed, which should not happen", "hash", txData.TxHash, "err", err)
|
||||
return 0
|
||||
}
|
||||
txPayloadLength := uint64(len(rlpTxData))
|
||||
w.txPayloadLengthCache[txData.TxHash] = txPayloadLength
|
||||
return txPayloadLength
|
||||
}
|
||||
|
||||
func convertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
|
||||
data, err := hexutil.Decode(txData.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode txData.Data: %s, err: %w", txData.Data, err)
|
||||
}
|
||||
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
|
||||
rlpTxData, err := tx.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal binary of the tx: %+v, err: %w", tx, err)
|
||||
}
|
||||
|
||||
return rlpTxData, nil
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// Chunk contains blocks to be encoded
|
||||
type Chunk struct {
|
||||
Blocks []*WrappedBlock `json:"blocks"`
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this chunk.
|
||||
// This number is the sum of included and skipped L1 messages.
|
||||
func (c *Chunk) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
|
||||
var numL1Messages uint64
|
||||
for _, block := range c.Blocks {
|
||||
numL1MessagesInBlock := block.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
numL1Messages += numL1MessagesInBlock
|
||||
totalL1MessagePoppedBefore += numL1MessagesInBlock
|
||||
}
|
||||
// TODO: cache results
|
||||
return numL1Messages
|
||||
}
|
||||
|
||||
// Encode encodes the Chunk into RollupV2 Chunk Encoding.
|
||||
func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
|
||||
numBlocks := len(c.Blocks)
|
||||
|
||||
if numBlocks > 255 {
|
||||
return nil, errors.New("number of blocks exceeds 1 byte")
|
||||
}
|
||||
if numBlocks == 0 {
|
||||
return nil, errors.New("number of blocks is 0")
|
||||
}
|
||||
|
||||
var chunkBytes []byte
|
||||
chunkBytes = append(chunkBytes, byte(numBlocks))
|
||||
|
||||
var l2TxDataBytes []byte
|
||||
|
||||
for _, block := range c.Blocks {
|
||||
blockBytes, err := block.Encode(totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode block: %v", err)
|
||||
}
|
||||
totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
|
||||
if len(blockBytes) != 60 {
|
||||
return nil, fmt.Errorf("block encoding is not 60 bytes long %x", len(blockBytes))
|
||||
}
|
||||
|
||||
chunkBytes = append(chunkBytes, blockBytes...)
|
||||
|
||||
// Append rlp-encoded l2Txs
|
||||
for _, txData := range block.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
rlpTxData, err := convertTxDataToRLPEncoding(txData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var txLen [4]byte
|
||||
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
|
||||
l2TxDataBytes = append(l2TxDataBytes, txLen[:]...)
|
||||
l2TxDataBytes = append(l2TxDataBytes, rlpTxData...)
|
||||
}
|
||||
}
|
||||
|
||||
chunkBytes = append(chunkBytes, l2TxDataBytes...)
|
||||
|
||||
return chunkBytes, nil
|
||||
}
|
||||
|
||||
// Hash hashes the Chunk into RollupV2 Chunk Hash
|
||||
func (c *Chunk) Hash(totalL1MessagePoppedBefore uint64) (common.Hash, error) {
|
||||
chunkBytes, err := c.Encode(totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
numBlocks := chunkBytes[0]
|
||||
|
||||
// concatenate block contexts
|
||||
var dataBytes []byte
|
||||
for i := 0; i < int(numBlocks); i++ {
|
||||
// only the first 58 bytes of each BlockContext are needed for the hashing process
|
||||
dataBytes = append(dataBytes, chunkBytes[1+60*i:60*i+59]...)
|
||||
}
|
||||
|
||||
// concatenate l1 and l2 tx hashes
|
||||
for _, block := range c.Blocks {
|
||||
var l1TxHashes []byte
|
||||
var l2TxHashes []byte
|
||||
for _, txData := range block.Transactions {
|
||||
txHash := strings.TrimPrefix(txData.TxHash, "0x")
|
||||
hashBytes, err := hex.DecodeString(txHash)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
l1TxHashes = append(l1TxHashes, hashBytes...)
|
||||
} else {
|
||||
l2TxHashes = append(l2TxHashes, hashBytes...)
|
||||
}
|
||||
}
|
||||
dataBytes = append(dataBytes, l1TxHashes...)
|
||||
dataBytes = append(dataBytes, l2TxHashes...)
|
||||
}
|
||||
|
||||
hash := crypto.Keccak256Hash(dataBytes)
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// EstimateL1CommitGas calculates the total L1 commit gas for this chunk approximately
|
||||
func (c *Chunk) EstimateL1CommitGas() uint64 {
|
||||
var totalTxNum uint64
|
||||
var totalL1CommitGas uint64
|
||||
for _, block := range c.Blocks {
|
||||
totalTxNum += uint64(len(block.Transactions))
|
||||
totalL1CommitGas += block.EstimateL1CommitGas()
|
||||
}
|
||||
|
||||
numBlocks := uint64(len(c.Blocks))
|
||||
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
|
||||
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
|
||||
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
|
||||
|
||||
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
|
||||
return totalL1CommitGas
|
||||
}
|
||||
@@ -1,226 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestChunkEncode(t *testing.T) {
|
||||
// Test case 1: when the chunk contains no blocks.
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
bytes, err := chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks is 0")
|
||||
|
||||
// Test case 2: when the chunk contains more than 255 blocks.
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
for i := 0; i < 256; i++ {
|
||||
chunk.Blocks = append(chunk.Blocks, &WrappedBlock{})
|
||||
}
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte")
|
||||
|
||||
// Test case 3: when the chunk contains one block.
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
assert.Equal(t, uint64(0), wrappedBlock.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(298), wrappedBlock.EstimateL1CommitCalldataSize())
|
||||
assert.Equal(t, uint64(2), wrappedBlock.NumL2Transactions())
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, uint64(0), chunk.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(6042), chunk.EstimateL1CommitGas())
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString := hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 299, len(bytes))
|
||||
assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000000000000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", hexString)
|
||||
|
||||
// Test case 4: when the chunk contains one block with 1 L1MsgTx
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
assert.Equal(t, uint64(11), wrappedBlock2.NumL1Messages(0)) // 0..=9 skipped, 10 included
|
||||
assert.Equal(t, uint64(96), wrappedBlock2.EstimateL1CommitCalldataSize())
|
||||
assert.Equal(t, uint64(1), wrappedBlock2.NumL2Transactions())
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(5329), chunk.EstimateL1CommitGas())
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 97, len(bytes))
|
||||
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
|
||||
|
||||
// Test case 5: when the chunk contains two blocks each with 1 L1MsgTx
|
||||
// TODO: revise this test, we cannot reuse the same L1MsgTx twice
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(10612), chunk.EstimateL1CommitGas())
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 193, len(bytes))
|
||||
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000001000000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
|
||||
}
|
||||
|
||||
func TestChunkHash(t *testing.T) {
|
||||
// Test case 1: when the chunk contains no blocks
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
hash, err := chunk.Hash(0)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000000", hash.Hex())
|
||||
assert.Contains(t, err.Error(), "number of blocks is 0")
|
||||
|
||||
// Test case 2: successfully hashing a chunk on one block
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
hash, err = chunk.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hash.Hex())
|
||||
|
||||
// Test case 3: successfully hashing a chunk on two blocks
|
||||
templateBlockTrace1, err := os.ReadFile("../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock1 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace1, wrappedBlock1))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
wrappedBlock1,
|
||||
},
|
||||
}
|
||||
hash, err = chunk.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0xaa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hash.Hex())
|
||||
|
||||
// Test case 4: successfully hashing a chunk on two blocks each with L1 and L2 txs
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
hash, err = chunk.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x2eb7dd63bf8fc29a0f8c10d16c2ae6f9da446907c79d50f5c164d30dc8526b60", hash.Hex())
|
||||
}
|
||||
|
||||
func TestErrorPaths(t *testing.T) {
|
||||
// test 1: Header.Number is not a uint64
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
wrappedBlock.Header.Number = wrappedBlock.Header.Number.Lsh(wrappedBlock.Header.Number, 64)
|
||||
bytes, err := wrappedBlock.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "block number is not uint64")
|
||||
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
for i := 0; i < 65537; i++ {
|
||||
wrappedBlock.Transactions = append(wrappedBlock.Transactions, wrappedBlock.Transactions[0])
|
||||
}
|
||||
|
||||
bytes, err = wrappedBlock.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
|
||||
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
|
||||
|
||||
wrappedBlock.Transactions = wrappedBlock.Transactions[:1]
|
||||
wrappedBlock.Transactions[0].Data = "not-a-hex"
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "hex string without 0x prefix")
|
||||
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
wrappedBlock.Transactions[0].TxHash = "not-a-hex"
|
||||
_, err = chunk.Hash(0)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid byte")
|
||||
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
for i := 0; i < 65535; i++ {
|
||||
tx := &wrappedBlock2.Transactions[i]
|
||||
txCopy := *tx
|
||||
txCopy.Nonce = uint64(i + 1)
|
||||
wrappedBlock2.Transactions = append(wrappedBlock2.Transactions, txCopy)
|
||||
}
|
||||
|
||||
bytes, err = wrappedBlock2.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
|
||||
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
|
||||
|
||||
}
|
||||
@@ -21,8 +21,8 @@ const (
|
||||
// GasOracleImported represents the gas oracle status is imported
|
||||
GasOracleImported
|
||||
|
||||
// GasOracleFailed represents the gas oracle status is failed
|
||||
GasOracleFailed
|
||||
// GasOracleImportedFailed represents the gas oracle status is imported failed
|
||||
GasOracleImportedFailed
|
||||
)
|
||||
|
||||
func (s GasOracleStatus) String() string {
|
||||
@@ -35,10 +35,10 @@ func (s GasOracleStatus) String() string {
|
||||
return "GasOracleImporting"
|
||||
case GasOracleImported:
|
||||
return "GasOracleImported"
|
||||
case GasOracleFailed:
|
||||
return "GasOracleFailed"
|
||||
case GasOracleImportedFailed:
|
||||
return "GasOracleImportedFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Undefined (%d)", int32(s))
|
||||
return fmt.Sprintf("Undefined GasOracleStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +159,7 @@ func (ps ProvingStatus) String() string {
|
||||
case ProvingTaskFailed:
|
||||
return "failed"
|
||||
default:
|
||||
return fmt.Sprintf("Undefined (%d)", int32(ps))
|
||||
return fmt.Sprintf("Undefined ProvingStatus (%d)", int32(ps))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,7 +184,7 @@ func (s ChunkProofsStatus) String() string {
|
||||
case ChunkProofsStatusReady:
|
||||
return "ChunkProofsStatusReady"
|
||||
default:
|
||||
return fmt.Sprintf("Undefined (%d)", int32(s))
|
||||
return fmt.Sprintf("Undefined ChunkProofsStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,6 +227,69 @@ func (s RollupStatus) String() string {
|
||||
case RollupFinalizeFailed:
|
||||
return "RollupFinalizeFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Undefined (%d)", int32(s))
|
||||
return fmt.Sprintf("Undefined RollupStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// SenderType defines the various types of senders sending the transactions.
|
||||
type SenderType int
|
||||
|
||||
const (
|
||||
// SenderTypeUnknown indicates an unknown sender type.
|
||||
SenderTypeUnknown SenderType = iota
|
||||
// SenderTypeCommitBatch indicates the sender is responsible for committing batches.
|
||||
SenderTypeCommitBatch
|
||||
// SenderTypeFinalizeBatch indicates the sender is responsible for finalizing batches.
|
||||
SenderTypeFinalizeBatch
|
||||
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
|
||||
SenderTypeL1GasOracle
|
||||
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
|
||||
SenderTypeL2GasOracle
|
||||
)
|
||||
|
||||
// String returns a string representation of the SenderType.
|
||||
func (t SenderType) String() string {
|
||||
switch t {
|
||||
case SenderTypeCommitBatch:
|
||||
return "SenderTypeCommitBatch"
|
||||
case SenderTypeFinalizeBatch:
|
||||
return "SenderTypeFinalizeBatch"
|
||||
case SenderTypeL1GasOracle:
|
||||
return "SenderTypeL1GasOracle"
|
||||
case SenderTypeL2GasOracle:
|
||||
return "SenderTypeL2GasOracle"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
|
||||
}
|
||||
}
|
||||
|
||||
// TxStatus represents the current status of a transaction in the transaction lifecycle.
|
||||
type TxStatus int
|
||||
|
||||
const (
|
||||
// TxStatusUnknown represents an undefined status of the transaction.
|
||||
TxStatusUnknown TxStatus = iota
|
||||
// TxStatusPending indicates that the transaction is yet to be processed.
|
||||
TxStatusPending
|
||||
// TxStatusReplaced indicates that the transaction has been replaced by another one, typically due to a higher gas price.
|
||||
TxStatusReplaced
|
||||
// TxStatusConfirmed indicates that the transaction has been successfully processed and confirmed.
|
||||
TxStatusConfirmed
|
||||
// TxStatusConfirmedFailed indicates that the transaction has failed during processing.
|
||||
TxStatusConfirmedFailed
|
||||
)
|
||||
|
||||
func (s TxStatus) String() string {
|
||||
switch s {
|
||||
case TxStatusPending:
|
||||
return "TxStatusPending"
|
||||
case TxStatusReplaced:
|
||||
return "TxStatusReplaced"
|
||||
case TxStatusConfirmed:
|
||||
return "TxStatusConfirmed"
|
||||
case TxStatusConfirmedFailed:
|
||||
return "TxStatusConfirmedFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestProvingStatus(t *testing.T) {
|
||||
{
|
||||
"Undefined",
|
||||
ProvingStatus(999), // Invalid value.
|
||||
"Undefined (999)",
|
||||
"Undefined ProvingStatus (999)",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -85,3 +85,243 @@ func TestProvingStatus(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollupStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s RollupStatus
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"RollupUndefined",
|
||||
RollupUndefined,
|
||||
"Undefined RollupStatus (0)",
|
||||
},
|
||||
{
|
||||
"RollupPending",
|
||||
RollupPending,
|
||||
"RollupPending",
|
||||
},
|
||||
{
|
||||
"RollupCommitting",
|
||||
RollupCommitting,
|
||||
"RollupCommitting",
|
||||
},
|
||||
{
|
||||
"RollupCommitted",
|
||||
RollupCommitted,
|
||||
"RollupCommitted",
|
||||
},
|
||||
{
|
||||
"RollupFinalizing",
|
||||
RollupFinalizing,
|
||||
"RollupFinalizing",
|
||||
},
|
||||
{
|
||||
"RollupFinalized",
|
||||
RollupFinalized,
|
||||
"RollupFinalized",
|
||||
},
|
||||
{
|
||||
"RollupCommitFailed",
|
||||
RollupCommitFailed,
|
||||
"RollupCommitFailed",
|
||||
},
|
||||
{
|
||||
"RollupFinalizeFailed",
|
||||
RollupFinalizeFailed,
|
||||
"RollupFinalizeFailed",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
RollupStatus(999),
|
||||
"Undefined RollupStatus (999)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, tt.s.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSenderType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
t SenderType
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"SenderTypeUnknown",
|
||||
SenderTypeUnknown,
|
||||
"Unknown SenderType (0)",
|
||||
},
|
||||
{
|
||||
"SenderTypeCommitBatch",
|
||||
SenderTypeCommitBatch,
|
||||
"SenderTypeCommitBatch",
|
||||
},
|
||||
{
|
||||
"SenderTypeFinalizeBatch",
|
||||
SenderTypeFinalizeBatch,
|
||||
"SenderTypeFinalizeBatch",
|
||||
},
|
||||
{
|
||||
"SenderTypeL1GasOracle",
|
||||
SenderTypeL1GasOracle,
|
||||
"SenderTypeL1GasOracle",
|
||||
},
|
||||
{
|
||||
"SenderTypeL2GasOracle",
|
||||
SenderTypeL2GasOracle,
|
||||
"SenderTypeL2GasOracle",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
SenderType(999),
|
||||
"Unknown SenderType (999)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, tt.t.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s TxStatus
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"TxStatusUnknown",
|
||||
TxStatusUnknown,
|
||||
"Unknown TxStatus (0)",
|
||||
},
|
||||
{
|
||||
"TxStatusPending",
|
||||
TxStatusPending,
|
||||
"TxStatusPending",
|
||||
},
|
||||
{
|
||||
"TxStatusReplaced",
|
||||
TxStatusReplaced,
|
||||
"TxStatusReplaced",
|
||||
},
|
||||
{
|
||||
"TxStatusConfirmed",
|
||||
TxStatusConfirmed,
|
||||
"TxStatusConfirmed",
|
||||
},
|
||||
{
|
||||
"TxStatusConfirmedFailed",
|
||||
TxStatusConfirmedFailed,
|
||||
"TxStatusConfirmedFailed",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
TxStatus(999),
|
||||
"Unknown TxStatus (999)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, tt.s.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGasOracleStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s GasOracleStatus
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"GasOracleUndefined",
|
||||
GasOracleUndefined,
|
||||
"GasOracleUndefined",
|
||||
},
|
||||
{
|
||||
"GasOraclePending",
|
||||
GasOraclePending,
|
||||
"GasOraclePending",
|
||||
},
|
||||
{
|
||||
"GasOracleImporting",
|
||||
GasOracleImporting,
|
||||
"GasOracleImporting",
|
||||
},
|
||||
{
|
||||
"GasOracleImported",
|
||||
GasOracleImported,
|
||||
"GasOracleImported",
|
||||
},
|
||||
{
|
||||
"GasOracleImportedFailed",
|
||||
GasOracleImportedFailed,
|
||||
"GasOracleImportedFailed",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
GasOracleStatus(999),
|
||||
"Undefined GasOracleStatus (999)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, tt.s.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProverTaskFailureType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
r ProverTaskFailureType
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"ProverTaskFailureTypeUndefined",
|
||||
ProverTaskFailureTypeUndefined,
|
||||
"prover task failure undefined",
|
||||
},
|
||||
{
|
||||
"ProverTaskFailureTypeTimeout",
|
||||
ProverTaskFailureTypeTimeout,
|
||||
"prover task failure timeout",
|
||||
},
|
||||
{
|
||||
"ProverTaskFailureTypeSubmitStatusNotOk",
|
||||
ProverTaskFailureTypeSubmitStatusNotOk,
|
||||
"prover task failure validated submit proof status not ok",
|
||||
},
|
||||
{
|
||||
"ProverTaskFailureTypeVerifiedFailed",
|
||||
ProverTaskFailureTypeVerifiedFailed,
|
||||
"prover task failure verified failed",
|
||||
},
|
||||
{
|
||||
"ProverTaskFailureTypeServerError",
|
||||
ProverTaskFailureTypeServerError,
|
||||
"prover task failure server exception",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
ProverTaskFailureType(999),
|
||||
"illegal prover task failure type (999)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, tt.r.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
64
common/types/encoding/bitmap.go
Normal file
64
common/types/encoding/bitmap.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package encoding
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// ConstructSkippedBitmap constructs skipped L1 message bitmap of the batch.
|
||||
func ConstructSkippedBitmap(batchIndex uint64, chunks []*Chunk, totalL1MessagePoppedBefore uint64) ([]byte, uint64, error) {
|
||||
// skipped L1 message bitmap, an array of 256-bit bitmaps
|
||||
var skippedBitmap []*big.Int
|
||||
|
||||
// the first queue index that belongs to this batch
|
||||
baseIndex := totalL1MessagePoppedBefore
|
||||
|
||||
// the next queue index that we need to process
|
||||
nextIndex := totalL1MessagePoppedBefore
|
||||
|
||||
for chunkID, chunk := range chunks {
|
||||
for blockID, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
currentIndex := tx.Nonce
|
||||
|
||||
if currentIndex < nextIndex {
|
||||
return nil, 0, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d. Batch index: %d, chunk index in batch: %d, block index in chunk: %d, block hash: %v, transaction hash: %v", nextIndex, currentIndex, batchIndex, chunkID, blockID, block.Header.Hash(), tx.TxHash)
|
||||
}
|
||||
|
||||
// mark skipped messages
|
||||
for skippedIndex := nextIndex; skippedIndex < currentIndex; skippedIndex++ {
|
||||
quo := int((skippedIndex - baseIndex) / 256)
|
||||
rem := int((skippedIndex - baseIndex) % 256)
|
||||
for len(skippedBitmap) <= quo {
|
||||
bitmap := big.NewInt(0)
|
||||
skippedBitmap = append(skippedBitmap, bitmap)
|
||||
}
|
||||
skippedBitmap[quo].SetBit(skippedBitmap[quo], rem, 1)
|
||||
}
|
||||
|
||||
// process included message
|
||||
quo := int((currentIndex - baseIndex) / 256)
|
||||
for len(skippedBitmap) <= quo {
|
||||
bitmap := big.NewInt(0)
|
||||
skippedBitmap = append(skippedBitmap, bitmap)
|
||||
}
|
||||
|
||||
nextIndex = currentIndex + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bitmapBytes := make([]byte, len(skippedBitmap)*32)
|
||||
for ii, num := range skippedBitmap {
|
||||
bytes := num.Bytes()
|
||||
padding := 32 - len(bytes)
|
||||
copy(bitmapBytes[32*ii+padding:], bytes)
|
||||
}
|
||||
|
||||
return bitmapBytes, nextIndex, nil
|
||||
}
|
||||
463
common/types/encoding/codecv0/codecv0.go
Normal file
463
common/types/encoding/codecv0/codecv0.go
Normal file
@@ -0,0 +1,463 @@
|
||||
package codecv0
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
"scroll-tech/common/types/encoding"
|
||||
)
|
||||
|
||||
// CodecV0Version denotes the version of the codec.
|
||||
const CodecV0Version = 0
|
||||
|
||||
// DABlock represents a Data Availability Block.
|
||||
type DABlock struct {
|
||||
BlockNumber uint64
|
||||
Timestamp uint64
|
||||
BaseFee *big.Int
|
||||
GasLimit uint64
|
||||
NumTransactions uint16
|
||||
NumL1Messages uint16
|
||||
}
|
||||
|
||||
// DAChunk groups consecutive DABlocks with their transactions.
|
||||
type DAChunk struct {
|
||||
Blocks []*DABlock
|
||||
Transactions [][]*types.TransactionData
|
||||
}
|
||||
|
||||
// DABatch contains metadata about a batch of DAChunks.
|
||||
type DABatch struct {
|
||||
Version uint8
|
||||
BatchIndex uint64
|
||||
L1MessagePopped uint64
|
||||
TotalL1MessagePopped uint64
|
||||
DataHash common.Hash
|
||||
ParentBatchHash common.Hash
|
||||
SkippedL1MessageBitmap []byte
|
||||
}
|
||||
|
||||
// NewDABlock creates a new DABlock from the given encoding.Block and the total number of L1 messages popped before.
|
||||
func NewDABlock(block *encoding.Block, totalL1MessagePoppedBefore uint64) (*DABlock, error) {
|
||||
if !block.Header.Number.IsUint64() {
|
||||
return nil, errors.New("block number is not uint64")
|
||||
}
|
||||
|
||||
// note: numL1Messages includes skipped messages
|
||||
numL1Messages := block.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
if numL1Messages > math.MaxUint16 {
|
||||
return nil, errors.New("number of L1 messages exceeds max uint16")
|
||||
}
|
||||
|
||||
// note: numTransactions includes skipped messages
|
||||
numL2Transactions := block.NumL2Transactions()
|
||||
numTransactions := numL1Messages + numL2Transactions
|
||||
if numTransactions > math.MaxUint16 {
|
||||
return nil, errors.New("number of transactions exceeds max uint16")
|
||||
}
|
||||
|
||||
daBlock := DABlock{
|
||||
BlockNumber: block.Header.Number.Uint64(),
|
||||
Timestamp: block.Header.Time,
|
||||
BaseFee: block.Header.BaseFee,
|
||||
GasLimit: block.Header.GasLimit,
|
||||
NumTransactions: uint16(numTransactions),
|
||||
NumL1Messages: uint16(numL1Messages),
|
||||
}
|
||||
|
||||
return &daBlock, nil
|
||||
}
|
||||
|
||||
// Encode serializes the DABlock into a slice of bytes.
|
||||
func (b *DABlock) Encode() []byte {
|
||||
bytes := make([]byte, 60)
|
||||
binary.BigEndian.PutUint64(bytes[0:], b.BlockNumber)
|
||||
binary.BigEndian.PutUint64(bytes[8:], b.Timestamp)
|
||||
if b.BaseFee != nil {
|
||||
binary.BigEndian.PutUint64(bytes[40:], b.BaseFee.Uint64())
|
||||
}
|
||||
binary.BigEndian.PutUint64(bytes[48:], b.GasLimit)
|
||||
binary.BigEndian.PutUint16(bytes[56:], b.NumTransactions)
|
||||
binary.BigEndian.PutUint16(bytes[58:], b.NumL1Messages)
|
||||
return bytes
|
||||
}
|
||||
|
||||
// NewDAChunk creates a new DAChunk from the given encoding.Chunk and the total number of L1 messages popped before.
|
||||
func NewDAChunk(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64) (*DAChunk, error) {
|
||||
var blocks []*DABlock
|
||||
var txs [][]*types.TransactionData
|
||||
|
||||
if chunk == nil {
|
||||
return nil, errors.New("chunk is nil")
|
||||
}
|
||||
|
||||
if len(chunk.Blocks) == 0 {
|
||||
return nil, errors.New("number of blocks is 0")
|
||||
}
|
||||
|
||||
if len(chunk.Blocks) > 255 {
|
||||
return nil, errors.New("number of blocks exceeds 1 byte")
|
||||
}
|
||||
|
||||
for _, block := range chunk.Blocks {
|
||||
b, err := NewDABlock(block, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blocks = append(blocks, b)
|
||||
totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
txs = append(txs, block.Transactions)
|
||||
}
|
||||
|
||||
daChunk := DAChunk{
|
||||
Blocks: blocks,
|
||||
Transactions: txs,
|
||||
}
|
||||
|
||||
return &daChunk, nil
|
||||
}
|
||||
|
||||
// Encode serializes the DAChunk into a slice of bytes.
|
||||
func (c *DAChunk) Encode() ([]byte, error) {
|
||||
var chunkBytes []byte
|
||||
chunkBytes = append(chunkBytes, byte(len(c.Blocks)))
|
||||
|
||||
var l2TxDataBytes []byte
|
||||
|
||||
for _, block := range c.Blocks {
|
||||
chunkBytes = append(chunkBytes, block.Encode()...)
|
||||
}
|
||||
|
||||
for _, blockTxs := range c.Transactions {
|
||||
for _, txData := range blockTxs {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
var txLen [4]byte
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(txData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
|
||||
l2TxDataBytes = append(l2TxDataBytes, txLen[:]...)
|
||||
l2TxDataBytes = append(l2TxDataBytes, rlpTxData...)
|
||||
}
|
||||
}
|
||||
|
||||
chunkBytes = append(chunkBytes, l2TxDataBytes...)
|
||||
return chunkBytes, nil
|
||||
}
|
||||
|
||||
// Hash computes the hash of the DAChunk data.
|
||||
func (c *DAChunk) Hash() (common.Hash, error) {
|
||||
chunkBytes, err := c.Encode()
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
if len(chunkBytes) == 0 {
|
||||
return common.Hash{}, errors.New("chunk data is empty and cannot be processed")
|
||||
}
|
||||
numBlocks := chunkBytes[0]
|
||||
|
||||
// concatenate block contexts
|
||||
var dataBytes []byte
|
||||
for i := 0; i < int(numBlocks); i++ {
|
||||
// only the first 58 bytes of each BlockContext are needed for the hashing process
|
||||
dataBytes = append(dataBytes, chunkBytes[1+60*i:60*i+59]...)
|
||||
}
|
||||
|
||||
// concatenate l1 and l2 tx hashes
|
||||
for _, blockTxs := range c.Transactions {
|
||||
var l1TxHashes []byte
|
||||
var l2TxHashes []byte
|
||||
for _, txData := range blockTxs {
|
||||
txHash := strings.TrimPrefix(txData.TxHash, "0x")
|
||||
hashBytes, err := hex.DecodeString(txHash)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to decode tx hash from TransactionData: hash=%v, err=%w", txData.TxHash, err)
|
||||
}
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
l1TxHashes = append(l1TxHashes, hashBytes...)
|
||||
} else {
|
||||
l2TxHashes = append(l2TxHashes, hashBytes...)
|
||||
}
|
||||
}
|
||||
dataBytes = append(dataBytes, l1TxHashes...)
|
||||
dataBytes = append(dataBytes, l2TxHashes...)
|
||||
}
|
||||
|
||||
hash := crypto.Keccak256Hash(dataBytes)
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// NewDABatch creates a DABatch from the provided encoding.Batch.
|
||||
func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
|
||||
// compute batch data hash
|
||||
var dataBytes []byte
|
||||
totalL1MessagePoppedBeforeChunk := batch.TotalL1MessagePoppedBefore
|
||||
|
||||
for _, chunk := range batch.Chunks {
|
||||
// build data hash
|
||||
daChunk, err := NewDAChunk(chunk, totalL1MessagePoppedBeforeChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalL1MessagePoppedBeforeChunk += chunk.NumL1Messages(totalL1MessagePoppedBeforeChunk)
|
||||
daChunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dataBytes = append(dataBytes, daChunkHash.Bytes()...)
|
||||
}
|
||||
|
||||
// compute data hash
|
||||
dataHash := crypto.Keccak256Hash(dataBytes)
|
||||
|
||||
// skipped L1 messages bitmap
|
||||
bitmapBytes, totalL1MessagePoppedAfter, err := encoding.ConstructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
daBatch := DABatch{
|
||||
Version: CodecV0Version,
|
||||
BatchIndex: batch.Index,
|
||||
L1MessagePopped: totalL1MessagePoppedAfter - batch.TotalL1MessagePoppedBefore,
|
||||
TotalL1MessagePopped: totalL1MessagePoppedAfter,
|
||||
DataHash: dataHash,
|
||||
ParentBatchHash: batch.ParentBatchHash,
|
||||
SkippedL1MessageBitmap: bitmapBytes,
|
||||
}
|
||||
|
||||
return &daBatch, nil
|
||||
}
|
||||
|
||||
// NewDABatchFromBytes attempts to decode the given byte slice into a DABatch.
|
||||
func NewDABatchFromBytes(data []byte) (*DABatch, error) {
|
||||
if len(data) < 89 {
|
||||
return nil, fmt.Errorf("insufficient data for DABatch, expected at least 89 bytes but got %d", len(data))
|
||||
}
|
||||
|
||||
b := &DABatch{
|
||||
Version: data[0],
|
||||
BatchIndex: binary.BigEndian.Uint64(data[1:9]),
|
||||
L1MessagePopped: binary.BigEndian.Uint64(data[9:17]),
|
||||
TotalL1MessagePopped: binary.BigEndian.Uint64(data[17:25]),
|
||||
DataHash: common.BytesToHash(data[25:57]),
|
||||
ParentBatchHash: common.BytesToHash(data[57:89]),
|
||||
SkippedL1MessageBitmap: data[89:],
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Encode serializes the DABatch into bytes.
|
||||
func (b *DABatch) Encode() []byte {
|
||||
batchBytes := make([]byte, 89+len(b.SkippedL1MessageBitmap))
|
||||
batchBytes[0] = b.Version
|
||||
binary.BigEndian.PutUint64(batchBytes[1:], b.BatchIndex)
|
||||
binary.BigEndian.PutUint64(batchBytes[9:], b.L1MessagePopped)
|
||||
binary.BigEndian.PutUint64(batchBytes[17:], b.TotalL1MessagePopped)
|
||||
copy(batchBytes[25:], b.DataHash[:])
|
||||
copy(batchBytes[57:], b.ParentBatchHash[:])
|
||||
copy(batchBytes[89:], b.SkippedL1MessageBitmap[:])
|
||||
return batchBytes
|
||||
}
|
||||
|
||||
// Hash computes the hash of the serialized DABatch.
|
||||
func (b *DABatch) Hash() common.Hash {
|
||||
bytes := b.Encode()
|
||||
return crypto.Keccak256Hash(bytes)
|
||||
}
|
||||
|
||||
// DecodeFromCalldata attempts to decode a DABatch and an array of DAChunks from the provided calldata byte slice.
|
||||
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
|
||||
// TODO: implement this function.
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
|
||||
const CalldataNonZeroByteGas = 16
|
||||
|
||||
// GetKeccak256Gas calculates the gas cost for computing the keccak256 hash of a given size.
|
||||
func GetKeccak256Gas(size uint64) uint64 {
|
||||
return GetMemoryExpansionCost(size) + 30 + 6*((size+31)/32)
|
||||
}
|
||||
|
||||
// GetMemoryExpansionCost calculates the cost of memory expansion for a given memoryByteSize.
|
||||
func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
|
||||
memorySizeWord := (memoryByteSize + 31) / 32
|
||||
memoryCost := (memorySizeWord*memorySizeWord)/512 + (3 * memorySizeWord)
|
||||
return memoryCost
|
||||
}
|
||||
|
||||
// EstimateBlockL1CommitCalldataSize calculates the calldata size in l1 commit for this block approximately.
|
||||
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
|
||||
// This needs to be adjusted in the future.
|
||||
func EstimateBlockL1CommitCalldataSize(b *encoding.Block) (uint64, error) {
|
||||
var size uint64
|
||||
for _, txData := range b.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
size += 4 // 4 bytes payload length
|
||||
txPayloadLength, err := getTxPayloadLength(txData)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size += txPayloadLength
|
||||
}
|
||||
size += 60 // 60 bytes BlockContext
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// EstimateBlockL1CommitGas calculates the total L1 commit gas for this block approximately.
|
||||
func EstimateBlockL1CommitGas(b *encoding.Block) (uint64, error) {
|
||||
var total uint64
|
||||
var numL1Messages uint64
|
||||
for _, txData := range b.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
numL1Messages++
|
||||
continue
|
||||
}
|
||||
|
||||
txPayloadLength, err := getTxPayloadLength(txData)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
|
||||
total += CalldataNonZeroByteGas * 4 // 4 bytes payload length
|
||||
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
|
||||
}
|
||||
|
||||
// 60 bytes BlockContext calldata
|
||||
total += CalldataNonZeroByteGas * 60
|
||||
|
||||
// sload
|
||||
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
|
||||
|
||||
// staticcall
|
||||
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
|
||||
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue
|
||||
|
||||
total += GetMemoryExpansionCost(36) * numL1Messages // staticcall to proxy
|
||||
total += 100 * numL1Messages // read admin in proxy
|
||||
total += 100 * numL1Messages // read impl in proxy
|
||||
total += 100 * numL1Messages // access impl
|
||||
total += GetMemoryExpansionCost(36) * numL1Messages // delegatecall to impl
|
||||
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// EstimateChunkL1CommitCalldataSize calculates the calldata size needed for committing a chunk to L1 approximately.
|
||||
func EstimateChunkL1CommitCalldataSize(c *encoding.Chunk) (uint64, error) {
|
||||
var totalL1CommitCalldataSize uint64
|
||||
for _, block := range c.Blocks {
|
||||
blockL1CommitCalldataSize, err := EstimateBlockL1CommitCalldataSize(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
totalL1CommitCalldataSize += blockL1CommitCalldataSize
|
||||
}
|
||||
return totalL1CommitCalldataSize, nil
|
||||
}
|
||||
|
||||
// EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately.
|
||||
func EstimateChunkL1CommitGas(c *encoding.Chunk) (uint64, error) {
|
||||
var totalTxNum uint64
|
||||
var totalL1CommitGas uint64
|
||||
for _, block := range c.Blocks {
|
||||
totalTxNum += uint64(len(block.Transactions))
|
||||
blockL1CommitGas, err := EstimateBlockL1CommitGas(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
totalL1CommitGas += blockL1CommitGas
|
||||
}
|
||||
|
||||
numBlocks := uint64(len(c.Blocks))
|
||||
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
|
||||
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
|
||||
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
|
||||
|
||||
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
|
||||
return totalL1CommitGas, nil
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately.
|
||||
func EstimateBatchL1CommitGas(b *encoding.Batch) (uint64, error) {
|
||||
var totalL1CommitGas uint64
|
||||
|
||||
// Add extra gas costs
|
||||
totalL1CommitGas += 100000 // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc
|
||||
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
|
||||
totalL1CommitGas += 20000 // 1 time sstore
|
||||
totalL1CommitGas += 21000 // base fee for tx
|
||||
totalL1CommitGas += CalldataNonZeroByteGas // version in calldata
|
||||
|
||||
// adjusting gas:
|
||||
// add 1 time cold sload (2100 gas) for L1MessageQueue
|
||||
// add 1 time cold address access (2600 gas) for L1MessageQueue
|
||||
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
|
||||
totalL1CommitGas += (2100 + 2600 - 100 - 100)
|
||||
totalL1CommitGas += GetKeccak256Gas(89 + 32) // parent batch header hash, length is estimated as 89 (constant part)+ 32 (1 skippedL1MessageBitmap)
|
||||
totalL1CommitGas += CalldataNonZeroByteGas * (89 + 32) // parent batch header in calldata
|
||||
|
||||
// adjust batch data hash gas cost
|
||||
totalL1CommitGas += GetKeccak256Gas(uint64(32 * len(b.Chunks)))
|
||||
|
||||
totalL1MessagePoppedBefore := b.TotalL1MessagePoppedBefore
|
||||
|
||||
for _, chunk := range b.Chunks {
|
||||
chunkL1CommitGas, err := EstimateChunkL1CommitGas(chunk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
totalL1CommitGas += chunkL1CommitGas
|
||||
|
||||
totalL1MessagePoppedInChunk := chunk.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
totalL1MessagePoppedBefore += totalL1MessagePoppedInChunk
|
||||
|
||||
totalL1CommitGas += CalldataNonZeroByteGas * (32 * (totalL1MessagePoppedInChunk + 255) / 256)
|
||||
totalL1CommitGas += GetKeccak256Gas(89 + 32*(totalL1MessagePoppedInChunk+255)/256)
|
||||
|
||||
totalL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
totalL1CommitGas += GetMemoryExpansionCost(totalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
return totalL1CommitGas, nil
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
|
||||
func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) (uint64, error) {
|
||||
var totalL1CommitCalldataSize uint64
|
||||
for _, chunk := range b.Chunks {
|
||||
chunkL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
totalL1CommitCalldataSize += chunkL1CommitCalldataSize
|
||||
}
|
||||
return totalL1CommitCalldataSize, nil
|
||||
}
|
||||
|
||||
func getTxPayloadLength(txData *types.TransactionData) (uint64, error) {
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(txData)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(len(rlpTxData)), nil
|
||||
}
|
||||
597
common/types/encoding/codecv0/codecv0_test.go
Normal file
597
common/types/encoding/codecv0/codecv0_test.go
Normal file
@@ -0,0 +1,597 @@
|
||||
package codecv0
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types/encoding"
|
||||
)
|
||||
|
||||
func TestCodecV0(t *testing.T) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
parentDABatch, err := NewDABatch(&encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: nil,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
parentBatchHash := parentDABatch.Hash()
|
||||
|
||||
block1 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
block2 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
block3 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
block4 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
|
||||
block5 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
|
||||
block6 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
|
||||
|
||||
blockL1CommitCalldataSize, err := EstimateBlockL1CommitCalldataSize(block1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(298), blockL1CommitCalldataSize)
|
||||
blockL1CommitGas, err := EstimateBlockL1CommitGas(block1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(4900), blockL1CommitGas)
|
||||
blockL1CommitCalldataSize, err = EstimateBlockL1CommitCalldataSize(block2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(5745), blockL1CommitCalldataSize)
|
||||
blockL1CommitGas, err = EstimateBlockL1CommitGas(block2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(93613), blockL1CommitGas)
|
||||
blockL1CommitCalldataSize, err = EstimateBlockL1CommitCalldataSize(block3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(96), blockL1CommitCalldataSize)
|
||||
blockL1CommitGas, err = EstimateBlockL1CommitGas(block3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(4187), blockL1CommitGas)
|
||||
blockL1CommitCalldataSize, err = EstimateBlockL1CommitCalldataSize(block4)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), blockL1CommitCalldataSize)
|
||||
blockL1CommitGas, err = EstimateBlockL1CommitGas(block4)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(14020), blockL1CommitGas)
|
||||
blockL1CommitCalldataSize, err = EstimateBlockL1CommitCalldataSize(block5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), blockL1CommitCalldataSize)
|
||||
blockL1CommitGas, err = EstimateBlockL1CommitGas(block5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(8796), blockL1CommitGas)
|
||||
blockL1CommitCalldataSize, err = EstimateBlockL1CommitCalldataSize(block6)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), blockL1CommitCalldataSize)
|
||||
blockL1CommitGas, err = EstimateBlockL1CommitGas(block6)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6184), blockL1CommitGas)
|
||||
|
||||
// Test case: when the batch and chunk contains one block.
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block1},
|
||||
}
|
||||
chunkL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(298), chunkL1CommitCalldataSize)
|
||||
chunkL1CommitGas, err := EstimateChunkL1CommitGas(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6042), chunkL1CommitGas)
|
||||
|
||||
daChunk, err := NewDAChunk(chunk, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkBytes, err := daChunk.Encode()
|
||||
assert.NoError(t, err)
|
||||
chunkHexString := hex.EncodeToString(chunkBytes)
|
||||
assert.Equal(t, 299, len(chunkBytes))
|
||||
assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000001de9000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", chunkHexString)
|
||||
daChunkHash, err := daChunk.Hash()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.HexToHash("0xde642c68122634b33fa1e6e4243b17be3bfd0dc6f996f204ef6d7522516bd840"), daChunkHash)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: parentBatchHash,
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
|
||||
batchL1CommitCalldataSize, err := EstimateBatchL1CommitCalldataSize(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(298), batchL1CommitCalldataSize)
|
||||
batchL1CommitGas, err := EstimateBatchL1CommitGas(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(162591), batchL1CommitGas)
|
||||
|
||||
daBatch, err := NewDABatch(batch)
|
||||
assert.NoError(t, err)
|
||||
batchBytes := daBatch.Encode()
|
||||
batchHexString := hex.EncodeToString(batchBytes)
|
||||
assert.Equal(t, 89, len(batchBytes))
|
||||
assert.Equal(t, "000000000000000001000000000000000000000000000000008fbc5eecfefc5bd9d1618ecef1fed160a7838448383595a2257d4c9bd5c5fa3eb0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab0", batchHexString)
|
||||
assert.Equal(t, 0, len(daBatch.SkippedL1MessageBitmap))
|
||||
assert.Equal(t, uint64(0), daBatch.TotalL1MessagePopped)
|
||||
assert.Equal(t, uint64(0), daBatch.L1MessagePopped)
|
||||
assert.Equal(t, common.HexToHash("0xa906c7d2b6b68ea5fec3ff9d60d41858676e0d365e5d5ef07b2ce20fcf24ecd7"), daBatch.Hash())
|
||||
|
||||
decodedDABatch, err := NewDABatchFromBytes(batchBytes)
|
||||
assert.NoError(t, err)
|
||||
decodedBatchBytes := decodedDABatch.Encode()
|
||||
decodedBatchHexString := hex.EncodeToString(decodedBatchBytes)
|
||||
assert.Equal(t, batchHexString, decodedBatchHexString)
|
||||
|
||||
// Test case: when the batch and chunk contains two block.
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block1, block2},
|
||||
}
|
||||
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6043), chunkL1CommitCalldataSize)
|
||||
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100742), chunkL1CommitGas)
|
||||
|
||||
daChunk, err = NewDAChunk(chunk, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkBytes, err = daChunk.Encode()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 6044, len(chunkBytes))
|
||||
daChunkHash, err = daChunk.Hash()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.HexToHash("0x014916a83eccdb0d01e814b4d4ab90eb9049ba9a3cb0994919b86ad873bcd028"), daChunkHash)
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: parentBatchHash,
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
|
||||
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6043), batchL1CommitCalldataSize)
|
||||
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(257897), batchL1CommitGas)
|
||||
|
||||
daBatch, err = NewDABatch(batch)
|
||||
assert.NoError(t, err)
|
||||
batchBytes = daBatch.Encode()
|
||||
batchHexString = hex.EncodeToString(batchBytes)
|
||||
assert.Equal(t, 89, len(batchBytes))
|
||||
assert.Equal(t, "0000000000000000010000000000000000000000000000000074dd561a36921590926bee01fd0d53747c5f3e48e48a2d5538b9ab0e1511cfd7b0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab0", batchHexString)
|
||||
assert.Equal(t, 0, len(daBatch.SkippedL1MessageBitmap))
|
||||
assert.Equal(t, uint64(0), daBatch.TotalL1MessagePopped)
|
||||
assert.Equal(t, uint64(0), daBatch.L1MessagePopped)
|
||||
assert.Equal(t, common.HexToHash("0xb02e39b740756824d20b2cac322ac365121411ced9d6e34de98a0b247c6e23e6"), daBatch.Hash())
|
||||
|
||||
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
|
||||
assert.NoError(t, err)
|
||||
decodedBatchBytes = decodedDABatch.Encode()
|
||||
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
|
||||
assert.Equal(t, batchHexString, decodedBatchHexString)
|
||||
|
||||
// Test case: when the chunk contains one block with 1 L1MsgTx.
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block3},
|
||||
}
|
||||
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(96), chunkL1CommitCalldataSize)
|
||||
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(5329), chunkL1CommitGas)
|
||||
|
||||
daChunk, err = NewDAChunk(chunk, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkBytes, err = daChunk.Encode()
|
||||
assert.NoError(t, err)
|
||||
chunkHexString = hex.EncodeToString(chunkBytes)
|
||||
assert.Equal(t, 97, len(chunkBytes))
|
||||
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", chunkHexString)
|
||||
daChunkHash, err = daChunk.Hash()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.HexToHash("0x9e643c8a9203df542e39d9bfdcb07c99575b3c3d557791329fef9d83cc4147d0"), daChunkHash)
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: parentBatchHash,
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
|
||||
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(96), batchL1CommitCalldataSize)
|
||||
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(161889), batchL1CommitGas)
|
||||
|
||||
daBatch, err = NewDABatch(batch)
|
||||
assert.NoError(t, err)
|
||||
batchBytes = daBatch.Encode()
|
||||
batchHexString = hex.EncodeToString(batchBytes)
|
||||
assert.Equal(t, 121, len(batchBytes))
|
||||
assert.Equal(t, "000000000000000001000000000000000b000000000000000b34f419ce7e882295bdb5aec6cce56ffa788a5fed4744d7fbd77e4acbf409f1cab0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab000000000000000000000000000000000000000000000000000000000000003ff", batchHexString)
|
||||
assert.Equal(t, 32, len(daBatch.SkippedL1MessageBitmap))
|
||||
expectedBitmap := "00000000000000000000000000000000000000000000000000000000000003ff"
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
|
||||
assert.Equal(t, uint64(11), daBatch.TotalL1MessagePopped)
|
||||
assert.Equal(t, uint64(11), daBatch.L1MessagePopped)
|
||||
assert.Equal(t, common.HexToHash("0xa18f07cb56ab4f2db5914d9b5699c5932bea4b5c73e71c8cec79151c11e9e986"), daBatch.Hash())
|
||||
|
||||
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
|
||||
assert.NoError(t, err)
|
||||
decodedBatchBytes = decodedDABatch.Encode()
|
||||
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
|
||||
assert.Equal(t, batchHexString, decodedBatchHexString)
|
||||
|
||||
// Test case: batch contains multiple chunks, chunk contains multiple blocks.
|
||||
chunk1 := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block1, block2, block3},
|
||||
}
|
||||
chunk1L1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6139), chunk1L1CommitCalldataSize)
|
||||
chunk1L1CommitGas, err := EstimateChunkL1CommitGas(chunk1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(106025), chunk1L1CommitGas)
|
||||
|
||||
daChunk1, err := NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkBytes1, err := daChunk1.Encode()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 6140, len(chunkBytes1))
|
||||
|
||||
chunk2 := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block4},
|
||||
}
|
||||
chunk2L1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), chunk2L1CommitCalldataSize)
|
||||
chunk2L1CommitGas, err := EstimateChunkL1CommitGas(chunk2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(15189), chunk2L1CommitGas)
|
||||
|
||||
daChunk2, err := NewDAChunk(chunk2, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkBytes2, err := daChunk2.Encode()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 61, len(chunkBytes2))
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: parentBatchHash,
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6199), batchL1CommitCalldataSize)
|
||||
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(279054), batchL1CommitGas)
|
||||
|
||||
daBatch, err = NewDABatch(batch)
|
||||
assert.NoError(t, err)
|
||||
batchBytes = daBatch.Encode()
|
||||
batchHexString = hex.EncodeToString(batchBytes)
|
||||
assert.Equal(t, 121, len(batchBytes))
|
||||
assert.Equal(t, "000000000000000001000000000000002a000000000000002a1f9b3d942a6ee14e7afc52225c91fa44faa0a7ec511df9a2d9348d33bcd142fcb0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab00000000000000000000000000000000000000000000000000000001ffffffbff", batchHexString)
|
||||
assert.Equal(t, 32, len(daBatch.SkippedL1MessageBitmap))
|
||||
expectedBitmap = "0000000000000000000000000000000000000000000000000000001ffffffbff"
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
|
||||
assert.Equal(t, uint64(42), daBatch.TotalL1MessagePopped)
|
||||
assert.Equal(t, uint64(42), daBatch.L1MessagePopped)
|
||||
assert.Equal(t, common.HexToHash("0xf7bd6afe02764e4e6df23a374d753182b57fa77be71aaf1cd8365e15a51872d1"), daBatch.Hash())
|
||||
|
||||
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
|
||||
assert.NoError(t, err)
|
||||
decodedBatchBytes = decodedDABatch.Encode()
|
||||
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
|
||||
assert.Equal(t, batchHexString, decodedBatchHexString)
|
||||
|
||||
// Test case: many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs.
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block4},
|
||||
}
|
||||
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), chunkL1CommitCalldataSize)
|
||||
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(15189), chunkL1CommitGas)
|
||||
|
||||
daChunk, err = NewDAChunk(chunk, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkBytes, err = daChunk.Encode()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 61, len(chunkBytes))
|
||||
daChunkHash, err = daChunk.Hash()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.HexToHash("0x854fc3136f47ce482ec85ee3325adfa16a1a1d60126e1c119eaaf0c3a9e90f8e"), daChunkHash)
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 37,
|
||||
ParentBatchHash: parentBatchHash,
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
|
||||
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), batchL1CommitCalldataSize)
|
||||
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(171730), batchL1CommitGas)
|
||||
|
||||
daBatch, err = NewDABatch(batch)
|
||||
assert.NoError(t, err)
|
||||
batchBytes = daBatch.Encode()
|
||||
batchHexString = hex.EncodeToString(batchBytes)
|
||||
assert.Equal(t, 121, len(batchBytes))
|
||||
assert.Equal(t, "0000000000000000010000000000000005000000000000002ac62fb58ec2d5393e00960f1cc23cab883b685296efa03d13ea2dd4c6de79cc55b0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab00000000000000000000000000000000000000000000000000000000000000000", batchHexString)
|
||||
assert.Equal(t, 32, len(daBatch.SkippedL1MessageBitmap))
|
||||
expectedBitmap = "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
|
||||
assert.Equal(t, uint64(42), daBatch.TotalL1MessagePopped)
|
||||
assert.Equal(t, uint64(5), daBatch.L1MessagePopped)
|
||||
assert.Equal(t, common.HexToHash("0x841f4657b7eb723cae35377cf2963b51191edad6a3b182d4c8524cb928d2a413"), daBatch.Hash())
|
||||
|
||||
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
|
||||
assert.NoError(t, err)
|
||||
decodedBatchBytes = decodedDABatch.Encode()
|
||||
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
|
||||
assert.Equal(t, batchHexString, decodedBatchHexString)
|
||||
|
||||
// Test case: many consecutive L1 Msgs in 1 bitmap, with leading skipped msgs.
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block4},
|
||||
}
|
||||
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), chunkL1CommitCalldataSize)
|
||||
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(15189), chunkL1CommitGas)
|
||||
|
||||
daChunk, err = NewDAChunk(chunk, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkBytes, err = daChunk.Encode()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 61, len(chunkBytes))
|
||||
daChunkHash, err = daChunk.Hash()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.HexToHash("0x854fc3136f47ce482ec85ee3325adfa16a1a1d60126e1c119eaaf0c3a9e90f8e"), daChunkHash)
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: parentBatchHash,
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
|
||||
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), batchL1CommitCalldataSize)
|
||||
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(171810), batchL1CommitGas)
|
||||
|
||||
daBatch, err = NewDABatch(batch)
|
||||
assert.NoError(t, err)
|
||||
batchBytes = daBatch.Encode()
|
||||
batchHexString = hex.EncodeToString(batchBytes)
|
||||
assert.Equal(t, 121, len(batchBytes))
|
||||
assert.Equal(t, "000000000000000001000000000000002a000000000000002a93255aa24dd468c5645f1e6901b8131a7a78a0eeb2a17cbb09ba64688a8de6b4b0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab00000000000000000000000000000000000000000000000000000001fffffffff", batchHexString)
|
||||
assert.Equal(t, 32, len(daBatch.SkippedL1MessageBitmap))
|
||||
expectedBitmap = "0000000000000000000000000000000000000000000000000000001fffffffff"
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
|
||||
assert.Equal(t, uint64(42), daBatch.TotalL1MessagePopped)
|
||||
assert.Equal(t, uint64(42), daBatch.L1MessagePopped)
|
||||
assert.Equal(t, common.HexToHash("0xa28766a3617cf244cc397fc4ce4c23022ec80f152b9f618807ac7e7c11486612"), daBatch.Hash())
|
||||
|
||||
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
|
||||
assert.NoError(t, err)
|
||||
decodedBatchBytes = decodedDABatch.Encode()
|
||||
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
|
||||
assert.Equal(t, batchHexString, decodedBatchHexString)
|
||||
|
||||
// Test case: many sparse L1 Msgs in 1 bitmap.
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block5},
|
||||
}
|
||||
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), chunkL1CommitCalldataSize)
|
||||
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(9947), chunkL1CommitGas)
|
||||
|
||||
daChunk, err = NewDAChunk(chunk, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkBytes, err = daChunk.Encode()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 61, len(chunkBytes))
|
||||
daChunkHash, err = daChunk.Hash()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.HexToHash("0x2aa220ca7bd1368e59e8053eb3831e30854aa2ec8bd3af65cee350c1c0718ba6"), daChunkHash)
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: parentBatchHash,
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
|
||||
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), batchL1CommitCalldataSize)
|
||||
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(166504), batchL1CommitGas)
|
||||
|
||||
daBatch, err = NewDABatch(batch)
|
||||
assert.NoError(t, err)
|
||||
batchBytes = daBatch.Encode()
|
||||
batchHexString = hex.EncodeToString(batchBytes)
|
||||
assert.Equal(t, 121, len(batchBytes))
|
||||
assert.Equal(t, "000000000000000001000000000000000a000000000000000ac7bcc8da943dd83404e84d9ce7e894ab97ce4829df4eb51ebbbe13c90b5a3f4db0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab000000000000000000000000000000000000000000000000000000000000001dd", batchHexString)
|
||||
assert.Equal(t, 32, len(daBatch.SkippedL1MessageBitmap))
|
||||
expectedBitmap = "00000000000000000000000000000000000000000000000000000000000001dd"
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
|
||||
assert.Equal(t, uint64(10), daBatch.TotalL1MessagePopped)
|
||||
assert.Equal(t, uint64(10), daBatch.L1MessagePopped)
|
||||
assert.Equal(t, common.HexToHash("0x2fee2073639eb9795007f7e765b3318f92658822de40b2134d34a478a0e9058a"), daBatch.Hash())
|
||||
|
||||
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
|
||||
assert.NoError(t, err)
|
||||
decodedBatchBytes = decodedDABatch.Encode()
|
||||
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
|
||||
assert.Equal(t, batchHexString, decodedBatchHexString)
|
||||
|
||||
// Test case: many L1 Msgs in each of 2 bitmaps.
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block6},
|
||||
}
|
||||
chunkL1CommitCalldataSize, err = EstimateChunkL1CommitCalldataSize(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), chunkL1CommitCalldataSize)
|
||||
chunkL1CommitGas, err = EstimateChunkL1CommitGas(chunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(7326), chunkL1CommitGas)
|
||||
|
||||
daChunk, err = NewDAChunk(chunk, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkBytes, err = daChunk.Encode()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 61, len(chunkBytes))
|
||||
daChunkHash, err = daChunk.Hash()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.HexToHash("0xb65521bea7daff75838de07951c3c055966750fb5a270fead5e0e727c32455c3"), daChunkHash)
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: parentBatchHash,
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
|
||||
batchL1CommitCalldataSize, err = EstimateBatchL1CommitCalldataSize(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(60), batchL1CommitCalldataSize)
|
||||
batchL1CommitGas, err = EstimateBatchL1CommitGas(batch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(164388), batchL1CommitGas)
|
||||
|
||||
daBatch, err = NewDABatch(batch)
|
||||
assert.NoError(t, err)
|
||||
batchBytes = daBatch.Encode()
|
||||
batchHexString = hex.EncodeToString(batchBytes)
|
||||
assert.Equal(t, 153, len(batchBytes))
|
||||
assert.Equal(t, "00000000000000000100000000000001010000000000000101899a411a3309c6491701b7b955c7b1115ac015414bbb71b59a0ca561668d5208b0a62a3048a2e6efb4e56e471eb826de86f8ccaa4af27c572b68db6f687b3ab0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd0000000000000000000000000000000000000000000000000000000000000000", batchHexString)
|
||||
assert.Equal(t, 64, len(daBatch.SkippedL1MessageBitmap))
|
||||
expectedBitmap = "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd0000000000000000000000000000000000000000000000000000000000000000"
|
||||
assert.Equal(t, expectedBitmap, common.Bytes2Hex(daBatch.SkippedL1MessageBitmap))
|
||||
assert.Equal(t, uint64(257), daBatch.TotalL1MessagePopped)
|
||||
assert.Equal(t, uint64(257), daBatch.L1MessagePopped)
|
||||
assert.Equal(t, common.HexToHash("0x84206bc6d0076a233fc7120a0bec4e03bf2512207437768828384dddb335ba2e"), daBatch.Hash())
|
||||
|
||||
decodedDABatch, err = NewDABatchFromBytes(batchBytes)
|
||||
assert.NoError(t, err)
|
||||
decodedBatchBytes = decodedDABatch.Encode()
|
||||
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
|
||||
assert.Equal(t, batchHexString, decodedBatchHexString)
|
||||
}
|
||||
|
||||
func TestErrorPaths(t *testing.T) {
|
||||
// Test case: when the chunk is nil.
|
||||
_, err := NewDAChunk(nil, 100)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "chunk is nil")
|
||||
|
||||
// Test case: when the chunk contains no blocks.
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{},
|
||||
}
|
||||
_, err = NewDAChunk(chunk, 0)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks is 0")
|
||||
|
||||
// Test case: when the chunk contains more than 255 blocks.
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{},
|
||||
}
|
||||
for i := 0; i < 256; i++ {
|
||||
chunk.Blocks = append(chunk.Blocks, &encoding.Block{})
|
||||
}
|
||||
_, err = NewDAChunk(chunk, 0)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte")
|
||||
|
||||
// Test case: Header.Number is not a uint64.
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
block.Header.Number = new(big.Int).Lsh(block.Header.Number, 64)
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
_, err = NewDAChunk(chunk, 0)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "block number is not uint64")
|
||||
|
||||
// Test case: number of transactions exceeds max uint16.
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
for i := 0; i < 65537; i++ {
|
||||
block.Transactions = append(block.Transactions, block.Transactions[0])
|
||||
}
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
_, err = NewDAChunk(chunk, 0)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
|
||||
|
||||
// Test case: decode transaction with hex string without 0x prefix error.
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
block.Transactions = block.Transactions[:1]
|
||||
block.Transactions[0].Data = "not-a-hex"
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
_, err = EstimateChunkL1CommitCalldataSize(chunk)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "hex string without 0x prefix")
|
||||
_, err = EstimateChunkL1CommitGas(chunk)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "hex string without 0x prefix")
|
||||
|
||||
// Test case: number of L1 messages exceeds max uint16.
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
for i := 0; i < 65535; i++ {
|
||||
tx := &block.Transactions[i]
|
||||
txCopy := *tx
|
||||
txCopy.Nonce = uint64(i + 1)
|
||||
block.Transactions = append(block.Transactions, txCopy)
|
||||
}
|
||||
chunk = &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
_, err = NewDAChunk(chunk, 0)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
|
||||
data, err := os.ReadFile(filename)
|
||||
assert.NoError(t, err)
|
||||
|
||||
block := &encoding.Block{}
|
||||
assert.NoError(t, json.Unmarshal(data, block))
|
||||
return block
|
||||
}
|
||||
503
common/types/encoding/codecv1/codecv1.go
Normal file
503
common/types/encoding/codecv1/codecv1.go
Normal file
@@ -0,0 +1,503 @@
|
||||
package codecv1
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types/encoding"
|
||||
)
|
||||
|
||||
var (
|
||||
// BLSModulus is the BLS modulus defined in EIP-4844.
|
||||
BLSModulus *big.Int
|
||||
|
||||
// BlobDataProofArgs defines the argument types for `_blobDataProof` in `finalizeBatchWithProof4844`.
|
||||
BlobDataProofArgs abi.Arguments
|
||||
|
||||
// MaxNumChunks is the maximum number of chunks that a batch can contain.
|
||||
MaxNumChunks int = 15
|
||||
)
|
||||
|
||||
func init() {
|
||||
// initialize modulus
|
||||
modulus, success := new(big.Int).SetString("52435875175126190479447740508185965837690552500527637822603658699938581184513", 10)
|
||||
if !success {
|
||||
log.Crit("BLSModulus conversion failed")
|
||||
}
|
||||
BLSModulus = modulus
|
||||
|
||||
// initialize arguments
|
||||
bytes32Type, err1 := abi.NewType("bytes32", "bytes32", nil)
|
||||
bytes48Type, err2 := abi.NewType("bytes48", "bytes48", nil)
|
||||
if err1 != nil || err2 != nil {
|
||||
log.Crit("Failed to initialize abi types", "err1", err1, "err2", err2)
|
||||
}
|
||||
|
||||
BlobDataProofArgs = abi.Arguments{
|
||||
{Type: bytes32Type, Name: "z"},
|
||||
{Type: bytes32Type, Name: "y"},
|
||||
{Type: bytes48Type, Name: "commitment"},
|
||||
{Type: bytes48Type, Name: "proof"},
|
||||
}
|
||||
}
|
||||
|
||||
// CodecV1Version denotes the version of the codec.
|
||||
const CodecV1Version = 1
|
||||
|
||||
// DABlock represents a Data Availability Block.
|
||||
type DABlock struct {
|
||||
BlockNumber uint64
|
||||
Timestamp uint64
|
||||
BaseFee *big.Int
|
||||
GasLimit uint64
|
||||
NumTransactions uint16
|
||||
NumL1Messages uint16
|
||||
}
|
||||
|
||||
// DAChunk groups consecutive DABlocks with their transactions.
|
||||
type DAChunk struct {
|
||||
Blocks []*DABlock
|
||||
Transactions [][]*types.TransactionData
|
||||
}
|
||||
|
||||
// DABatch contains metadata about a batch of DAChunks.
|
||||
type DABatch struct {
|
||||
// header
|
||||
Version uint8
|
||||
BatchIndex uint64
|
||||
L1MessagePopped uint64
|
||||
TotalL1MessagePopped uint64
|
||||
DataHash common.Hash
|
||||
BlobVersionedHash common.Hash
|
||||
ParentBatchHash common.Hash
|
||||
SkippedL1MessageBitmap []byte
|
||||
|
||||
// blob payload
|
||||
blob *kzg4844.Blob
|
||||
z *kzg4844.Point
|
||||
}
|
||||
|
||||
// NewDABlock creates a new DABlock from the given encoding.Block and the total number of L1 messages popped before.
|
||||
func NewDABlock(block *encoding.Block, totalL1MessagePoppedBefore uint64) (*DABlock, error) {
|
||||
if !block.Header.Number.IsUint64() {
|
||||
return nil, errors.New("block number is not uint64")
|
||||
}
|
||||
|
||||
// note: numL1Messages includes skipped messages
|
||||
numL1Messages := block.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
if numL1Messages > math.MaxUint16 {
|
||||
return nil, errors.New("number of L1 messages exceeds max uint16")
|
||||
}
|
||||
|
||||
// note: numTransactions includes skipped messages
|
||||
numL2Transactions := block.NumL2Transactions()
|
||||
numTransactions := numL1Messages + numL2Transactions
|
||||
if numTransactions > math.MaxUint16 {
|
||||
return nil, errors.New("number of transactions exceeds max uint16")
|
||||
}
|
||||
|
||||
daBlock := DABlock{
|
||||
BlockNumber: block.Header.Number.Uint64(),
|
||||
Timestamp: block.Header.Time,
|
||||
BaseFee: block.Header.BaseFee,
|
||||
GasLimit: block.Header.GasLimit,
|
||||
NumTransactions: uint16(numTransactions),
|
||||
NumL1Messages: uint16(numL1Messages),
|
||||
}
|
||||
|
||||
return &daBlock, nil
|
||||
}
|
||||
|
||||
// Encode serializes the DABlock into a slice of bytes.
|
||||
func (b *DABlock) Encode() []byte {
|
||||
bytes := make([]byte, 60)
|
||||
binary.BigEndian.PutUint64(bytes[0:], b.BlockNumber)
|
||||
binary.BigEndian.PutUint64(bytes[8:], b.Timestamp)
|
||||
if b.BaseFee != nil {
|
||||
binary.BigEndian.PutUint64(bytes[40:], b.BaseFee.Uint64())
|
||||
}
|
||||
binary.BigEndian.PutUint64(bytes[48:], b.GasLimit)
|
||||
binary.BigEndian.PutUint16(bytes[56:], b.NumTransactions)
|
||||
binary.BigEndian.PutUint16(bytes[58:], b.NumL1Messages)
|
||||
return bytes
|
||||
}
|
||||
|
||||
// NewDAChunk creates a new DAChunk from the given encoding.Chunk and the total number of L1 messages popped before.
|
||||
func NewDAChunk(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64) (*DAChunk, error) {
|
||||
var blocks []*DABlock
|
||||
var txs [][]*types.TransactionData
|
||||
|
||||
for _, block := range chunk.Blocks {
|
||||
b, err := NewDABlock(block, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blocks = append(blocks, b)
|
||||
totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
txs = append(txs, block.Transactions)
|
||||
}
|
||||
|
||||
daChunk := DAChunk{
|
||||
Blocks: blocks,
|
||||
Transactions: txs,
|
||||
}
|
||||
|
||||
return &daChunk, nil
|
||||
}
|
||||
|
||||
// Encode serializes the DAChunk into a slice of bytes.
|
||||
func (c *DAChunk) Encode() []byte {
|
||||
var chunkBytes []byte
|
||||
chunkBytes = append(chunkBytes, byte(len(c.Blocks)))
|
||||
|
||||
for _, block := range c.Blocks {
|
||||
blockBytes := block.Encode()
|
||||
chunkBytes = append(chunkBytes, blockBytes...)
|
||||
}
|
||||
|
||||
return chunkBytes
|
||||
}
|
||||
|
||||
// Hash computes the hash of the DAChunk data.
|
||||
func (c *DAChunk) Hash() (common.Hash, error) {
|
||||
var dataBytes []byte
|
||||
|
||||
// concatenate block contexts
|
||||
for _, block := range c.Blocks {
|
||||
encodedBlock := block.Encode()
|
||||
// only the first 58 bytes are used in the hashing process
|
||||
dataBytes = append(dataBytes, encodedBlock[:58]...)
|
||||
}
|
||||
|
||||
// concatenate l1 tx hashes
|
||||
for _, blockTxs := range c.Transactions {
|
||||
for _, txData := range blockTxs {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
txHash := strings.TrimPrefix(txData.TxHash, "0x")
|
||||
hashBytes, err := hex.DecodeString(txHash)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
if len(hashBytes) != 32 {
|
||||
return common.Hash{}, fmt.Errorf("unexpected hash: %s", txData.TxHash)
|
||||
}
|
||||
dataBytes = append(dataBytes, hashBytes...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hash := crypto.Keccak256Hash(dataBytes)
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// NewDABatch creates a DABatch from the provided encoding.Batch.
|
||||
func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
|
||||
// this encoding can only support a fixed number of chunks per batch
|
||||
if len(batch.Chunks) > MaxNumChunks {
|
||||
return nil, fmt.Errorf("too many chunks in batch")
|
||||
}
|
||||
|
||||
if len(batch.Chunks) == 0 {
|
||||
return nil, fmt.Errorf("too few chunks in batch")
|
||||
}
|
||||
|
||||
// batch data hash
|
||||
dataHash, err := computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// skipped L1 messages bitmap
|
||||
bitmapBytes, totalL1MessagePoppedAfter, err := encoding.ConstructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// blob payload
|
||||
blob, z, err := constructBlobPayload(batch.Chunks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// blob versioned hash
|
||||
c, err := kzg4844.BlobToCommitment(*blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
|
||||
|
||||
daBatch := DABatch{
|
||||
Version: CodecV1Version,
|
||||
BatchIndex: batch.Index,
|
||||
L1MessagePopped: totalL1MessagePoppedAfter - batch.TotalL1MessagePoppedBefore,
|
||||
TotalL1MessagePopped: totalL1MessagePoppedAfter,
|
||||
DataHash: dataHash,
|
||||
BlobVersionedHash: blobVersionedHash,
|
||||
ParentBatchHash: batch.ParentBatchHash,
|
||||
SkippedL1MessageBitmap: bitmapBytes,
|
||||
blob: blob,
|
||||
z: z,
|
||||
}
|
||||
|
||||
return &daBatch, nil
|
||||
}
|
||||
|
||||
// computeBatchDataHash computes the data hash of the batch.
|
||||
// Note: The batch hash and batch data hash are two different hashes,
|
||||
// the former is used for identifying a badge in the contracts,
|
||||
// the latter is used in the public input to the provers.
|
||||
func computeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore uint64) (common.Hash, error) {
|
||||
var dataBytes []byte
|
||||
totalL1MessagePoppedBeforeChunk := totalL1MessagePoppedBefore
|
||||
|
||||
for _, chunk := range chunks {
|
||||
daChunk, err := NewDAChunk(chunk, totalL1MessagePoppedBeforeChunk)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
totalL1MessagePoppedBeforeChunk += chunk.NumL1Messages(totalL1MessagePoppedBeforeChunk)
|
||||
chunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
dataBytes = append(dataBytes, chunkHash.Bytes()...)
|
||||
}
|
||||
|
||||
dataHash := crypto.Keccak256Hash(dataBytes)
|
||||
return dataHash, nil
|
||||
}
|
||||
|
||||
// constructBlobPayload constructs the 4844 blob payload.
|
||||
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Point, error) {
|
||||
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
|
||||
metadataLength := 2 + MaxNumChunks*4
|
||||
|
||||
// the raw (un-padded) blob payload
|
||||
blobBytes := make([]byte, metadataLength)
|
||||
|
||||
// challenge digest preimage
|
||||
// 1 hash for metadata and 1 for each chunk
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
|
||||
|
||||
// the challenge point z
|
||||
var z kzg4844.Point
|
||||
|
||||
// the chunk data hash used for calculating the challenge preimage
|
||||
var chunkDataHash common.Hash
|
||||
|
||||
// blob metadata: num_chunks
|
||||
binary.BigEndian.PutUint16(blobBytes[0:], uint16(len(chunks)))
|
||||
|
||||
// encode blob metadata and L2 transactions,
|
||||
// and simultaneously also build challenge preimage
|
||||
for chunkID, chunk := range chunks {
|
||||
currentChunkStartIndex := len(blobBytes)
|
||||
|
||||
for _, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
// encode L2 txs into blob payload
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blobBytes = append(blobBytes, rlpTxData...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// blob metadata: chunki_size
|
||||
if chunkSize := len(blobBytes) - currentChunkStartIndex; chunkSize != 0 {
|
||||
binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize))
|
||||
}
|
||||
|
||||
// challenge: compute chunk data hash
|
||||
chunkDataHash = crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:])
|
||||
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
|
||||
}
|
||||
|
||||
// if we have fewer than MaxNumChunks chunks, the rest
|
||||
// of the blob metadata is correctly initialized to 0,
|
||||
// but we need to add padding to the challenge preimage
|
||||
for chunkID := len(chunks); chunkID < MaxNumChunks; chunkID++ {
|
||||
// use the last chunk's data hash as padding
|
||||
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
|
||||
}
|
||||
|
||||
// challenge: compute metadata hash
|
||||
hash := crypto.Keccak256Hash(blobBytes[0:metadataLength])
|
||||
copy(challengePreimage[0:], hash[:])
|
||||
|
||||
// convert raw data to BLSFieldElements
|
||||
blob, err := makeBlobCanonical(blobBytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// compute z = challenge_digest % BLS_MODULUS
|
||||
challengeDigest := crypto.Keccak256Hash(challengePreimage[:])
|
||||
point := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
|
||||
copy(z[:], point.Bytes()[0:32])
|
||||
|
||||
return blob, &z, nil
|
||||
}
|
||||
|
||||
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.
|
||||
func makeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) {
|
||||
// blob contains 131072 bytes but we can only utilize 31/32 of these
|
||||
if len(blobBytes) > 126976 {
|
||||
return nil, fmt.Errorf("oversized batch payload")
|
||||
}
|
||||
|
||||
// the canonical (padded) blob payload
|
||||
var blob kzg4844.Blob
|
||||
|
||||
// encode blob payload by prepending every 31 bytes with 1 zero byte
|
||||
index := 0
|
||||
|
||||
for from := 0; from < len(blobBytes); from += 31 {
|
||||
to := from + 31
|
||||
if to > len(blobBytes) {
|
||||
to = len(blobBytes)
|
||||
}
|
||||
copy(blob[index+1:], blobBytes[from:to])
|
||||
index += 32
|
||||
}
|
||||
|
||||
return &blob, nil
|
||||
}
|
||||
|
||||
// NewDABatchFromBytes attempts to decode the given byte slice into a DABatch.
|
||||
// Note: This function only populates the batch header, it leaves the blob-related fields empty.
|
||||
func NewDABatchFromBytes(data []byte) (*DABatch, error) {
|
||||
if len(data) < 121 {
|
||||
return nil, fmt.Errorf("insufficient data for DABatch, expected at least 121 bytes but got %d", len(data))
|
||||
}
|
||||
|
||||
b := &DABatch{
|
||||
Version: data[0],
|
||||
BatchIndex: binary.BigEndian.Uint64(data[1:9]),
|
||||
L1MessagePopped: binary.BigEndian.Uint64(data[9:17]),
|
||||
TotalL1MessagePopped: binary.BigEndian.Uint64(data[17:25]),
|
||||
DataHash: common.BytesToHash(data[25:57]),
|
||||
BlobVersionedHash: common.BytesToHash(data[57:89]),
|
||||
ParentBatchHash: common.BytesToHash(data[89:121]),
|
||||
SkippedL1MessageBitmap: data[121:],
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Encode serializes the DABatch into bytes.
|
||||
func (b *DABatch) Encode() []byte {
|
||||
batchBytes := make([]byte, 121+len(b.SkippedL1MessageBitmap))
|
||||
batchBytes[0] = b.Version
|
||||
binary.BigEndian.PutUint64(batchBytes[1:], b.BatchIndex)
|
||||
binary.BigEndian.PutUint64(batchBytes[9:], b.L1MessagePopped)
|
||||
binary.BigEndian.PutUint64(batchBytes[17:], b.TotalL1MessagePopped)
|
||||
copy(batchBytes[25:], b.DataHash[:])
|
||||
copy(batchBytes[57:], b.BlobVersionedHash[:])
|
||||
copy(batchBytes[89:], b.ParentBatchHash[:])
|
||||
copy(batchBytes[121:], b.SkippedL1MessageBitmap[:])
|
||||
return batchBytes
|
||||
}
|
||||
|
||||
// Hash computes the hash of the serialized DABatch.
|
||||
func (b *DABatch) Hash() common.Hash {
|
||||
bytes := b.Encode()
|
||||
return crypto.Keccak256Hash(bytes)
|
||||
}
|
||||
|
||||
// BlobDataProof computes the abi-encoded blob verification data.
|
||||
func (b *DABatch) BlobDataProof() ([]byte, error) {
|
||||
if b.blob == nil {
|
||||
return nil, errors.New("called BlobDataProof with empty blob")
|
||||
}
|
||||
if b.z == nil {
|
||||
return nil, errors.New("called BlobDataProof with empty z")
|
||||
}
|
||||
|
||||
commitment, err := kzg4844.BlobToCommitment(*b.blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
|
||||
proof, y, err := kzg4844.ComputeProof(*b.blob, *b.z)
|
||||
if err != nil {
|
||||
log.Crit("failed to create KZG proof at point", "err", err, "z", hex.EncodeToString(b.z[:]))
|
||||
}
|
||||
|
||||
// Memory layout of ``_blobDataProof``:
|
||||
// | z | y | kzg_commitment | kzg_proof |
|
||||
// |---------|---------|----------------|-----------|
|
||||
// | bytes32 | bytes32 | bytes48 | bytes48 |
|
||||
|
||||
values := []interface{}{*b.z, y, commitment, proof}
|
||||
return BlobDataProofArgs.Pack(values...)
|
||||
}
|
||||
|
||||
// Blob returns the blob of the batch.
|
||||
func (b *DABatch) Blob() *kzg4844.Blob {
|
||||
return b.blob
|
||||
}
|
||||
|
||||
// DecodeFromCalldata attempts to decode a DABatch and an array of DAChunks from the provided calldata byte slice.
|
||||
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
|
||||
// TODO: implement this function.
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// EstimateChunkL1CommitBlobSize estimates the size of the L1 commit blob for a single chunk.
|
||||
func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
|
||||
metadataSize := uint64(2 + 4*MaxNumChunks) // over-estimate: adding metadata length
|
||||
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
paddedSize := ((metadataSize + chunkDataSize + 30) / 31) * 32
|
||||
return paddedSize, nil
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
|
||||
func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
|
||||
metadataSize := uint64(2 + 4*MaxNumChunks)
|
||||
var batchDataSize uint64
|
||||
for _, c := range b.Chunks {
|
||||
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
batchDataSize += chunkDataSize
|
||||
}
|
||||
paddedSize := ((metadataSize + batchDataSize + 30) / 31) * 32
|
||||
return paddedSize, nil
|
||||
}
|
||||
|
||||
func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
|
||||
var dataSize uint64
|
||||
for _, block := range c.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dataSize += uint64(len(rlpTxData))
|
||||
}
|
||||
}
|
||||
}
|
||||
return dataSize, nil
|
||||
}
|
||||
742
common/types/encoding/codecv1/codecv1_test.go
Normal file
742
common/types/encoding/codecv1/codecv1_test.go
Normal file
File diff suppressed because one or more lines are too long
222
common/types/encoding/da.go
Normal file
222
common/types/encoding/da.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package encoding
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// CodecVersion defines the version of encoder and decoder.
|
||||
type CodecVersion int
|
||||
|
||||
const (
|
||||
// CodecV0 represents the version 0 of the encoder and decoder.
|
||||
CodecV0 CodecVersion = iota
|
||||
|
||||
// CodecV1 represents the version 1 of the encoder and decoder.
|
||||
CodecV1
|
||||
)
|
||||
|
||||
// Block represents an L2 block.
|
||||
type Block struct {
|
||||
Header *types.Header
|
||||
Transactions []*types.TransactionData
|
||||
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
RowConsumption *types.RowConsumption `json:"row_consumption,omitempty"`
|
||||
}
|
||||
|
||||
// Chunk represents a group of blocks.
|
||||
type Chunk struct {
|
||||
Blocks []*Block `json:"blocks"`
|
||||
}
|
||||
|
||||
// Batch represents a batch of chunks.
|
||||
type Batch struct {
|
||||
Index uint64
|
||||
TotalL1MessagePoppedBefore uint64
|
||||
ParentBatchHash common.Hash
|
||||
Chunks []*Chunk
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this block.
|
||||
// This number is the sum of included and skipped L1 messages.
|
||||
func (b *Block) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
|
||||
var lastQueueIndex *uint64
|
||||
for _, txData := range b.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
lastQueueIndex = &txData.Nonce
|
||||
}
|
||||
}
|
||||
if lastQueueIndex == nil {
|
||||
return 0
|
||||
}
|
||||
// note: last queue index included before this block is totalL1MessagePoppedBefore - 1
|
||||
// TODO: cache results
|
||||
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
|
||||
}
|
||||
|
||||
// NumL2Transactions returns the number of L2 transactions in this block.
|
||||
func (b *Block) NumL2Transactions() uint64 {
|
||||
var count uint64
|
||||
for _, txData := range b.Transactions {
|
||||
if txData.Type != types.L1MessageTxType {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this chunk.
|
||||
// This number is the sum of included and skipped L1 messages.
|
||||
func (c *Chunk) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
|
||||
var numL1Messages uint64
|
||||
for _, block := range c.Blocks {
|
||||
numL1MessagesInBlock := block.NumL1Messages(totalL1MessagePoppedBefore)
|
||||
numL1Messages += numL1MessagesInBlock
|
||||
totalL1MessagePoppedBefore += numL1MessagesInBlock
|
||||
}
|
||||
// TODO: cache results
|
||||
return numL1Messages
|
||||
}
|
||||
|
||||
// ConvertTxDataToRLPEncoding transforms []*TransactionData into []*types.Transaction.
|
||||
func ConvertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
|
||||
data, err := hexutil.Decode(txData.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode txData.Data: data=%v, err=%w", txData.Data, err)
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
switch txData.Type {
|
||||
case types.LegacyTxType:
|
||||
tx = types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
|
||||
case types.AccessListTxType:
|
||||
tx = types.NewTx(&types.AccessListTx{
|
||||
ChainID: txData.ChainId.ToInt(),
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
AccessList: txData.AccessList,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
|
||||
case types.DynamicFeeTxType:
|
||||
tx = types.NewTx(&types.DynamicFeeTx{
|
||||
ChainID: txData.ChainId.ToInt(),
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasTipCap: txData.GasTipCap.ToInt(),
|
||||
GasFeeCap: txData.GasFeeCap.ToInt(),
|
||||
Data: data,
|
||||
AccessList: txData.AccessList,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
|
||||
case types.L1MessageTxType: // L1MessageTxType is not supported
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported tx type: %d", txData.Type)
|
||||
}
|
||||
|
||||
rlpTxData, err := tx.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal binary of the tx: tx=%v, err=%w", tx, err)
|
||||
}
|
||||
|
||||
return rlpTxData, nil
|
||||
}
|
||||
|
||||
// CrcMax calculates the maximum row consumption of crc.
|
||||
func (c *Chunk) CrcMax() (uint64, error) {
|
||||
// Map sub-circuit name to row count
|
||||
crc := make(map[string]uint64)
|
||||
|
||||
// Iterate over blocks, accumulate row consumption
|
||||
for _, block := range c.Blocks {
|
||||
if block.RowConsumption == nil {
|
||||
return 0, fmt.Errorf("block (%d, %v) has nil RowConsumption", block.Header.Number, block.Header.Hash().Hex())
|
||||
}
|
||||
for _, subCircuit := range *block.RowConsumption {
|
||||
crc[subCircuit.Name] += subCircuit.RowNumber
|
||||
}
|
||||
}
|
||||
|
||||
// Find the maximum row consumption
|
||||
var maxVal uint64
|
||||
for _, value := range crc {
|
||||
if value > maxVal {
|
||||
maxVal = value
|
||||
}
|
||||
}
|
||||
|
||||
// Return the maximum row consumption
|
||||
return maxVal, nil
|
||||
}
|
||||
|
||||
// NumTransactions calculates the total number of transactions in a Chunk.
|
||||
func (c *Chunk) NumTransactions() uint64 {
|
||||
var totalTxNum uint64
|
||||
for _, block := range c.Blocks {
|
||||
totalTxNum += uint64(len(block.Transactions))
|
||||
}
|
||||
return totalTxNum
|
||||
}
|
||||
|
||||
// NumL2Transactions calculates the total number of L2 transactions in a Chunk.
|
||||
func (c *Chunk) NumL2Transactions() uint64 {
|
||||
var totalTxNum uint64
|
||||
for _, block := range c.Blocks {
|
||||
totalTxNum += block.NumL2Transactions()
|
||||
}
|
||||
return totalTxNum
|
||||
}
|
||||
|
||||
// L2GasUsed calculates the total gas of L2 transactions in a Chunk.
|
||||
func (c *Chunk) L2GasUsed() uint64 {
|
||||
var totalTxNum uint64
|
||||
for _, block := range c.Blocks {
|
||||
totalTxNum += block.Header.GasUsed
|
||||
}
|
||||
return totalTxNum
|
||||
}
|
||||
|
||||
// StateRoot gets the state root after committing/finalizing the batch.
|
||||
func (b *Batch) StateRoot() common.Hash {
|
||||
numChunks := len(b.Chunks)
|
||||
if len(b.Chunks) == 0 {
|
||||
return common.Hash{}
|
||||
}
|
||||
lastChunkBlockNum := len(b.Chunks[numChunks-1].Blocks)
|
||||
return b.Chunks[len(b.Chunks)-1].Blocks[lastChunkBlockNum-1].Header.Root
|
||||
}
|
||||
|
||||
// WithdrawRoot gets the withdraw root after committing/finalizing the batch.
|
||||
func (b *Batch) WithdrawRoot() common.Hash {
|
||||
numChunks := len(b.Chunks)
|
||||
if len(b.Chunks) == 0 {
|
||||
return common.Hash{}
|
||||
}
|
||||
lastChunkBlockNum := len(b.Chunks[numChunks-1].Blocks)
|
||||
return b.Chunks[len(b.Chunks)-1].Blocks[lastChunkBlockNum-1].WithdrawRoot
|
||||
}
|
||||
121
common/types/encoding/da_test.go
Normal file
121
common/types/encoding/da_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package encoding
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestUtilFunctions(t *testing.T) {
|
||||
block1 := readBlockFromJSON(t, "../../testdata/blockTrace_02.json")
|
||||
block2 := readBlockFromJSON(t, "../../testdata/blockTrace_03.json")
|
||||
block3 := readBlockFromJSON(t, "../../testdata/blockTrace_04.json")
|
||||
block4 := readBlockFromJSON(t, "../../testdata/blockTrace_05.json")
|
||||
block5 := readBlockFromJSON(t, "../../testdata/blockTrace_06.json")
|
||||
block6 := readBlockFromJSON(t, "../../testdata/blockTrace_07.json")
|
||||
|
||||
chunk1 := &Chunk{Blocks: []*Block{block1, block2}}
|
||||
chunk2 := &Chunk{Blocks: []*Block{block3, block4}}
|
||||
chunk3 := &Chunk{Blocks: []*Block{block5, block6}}
|
||||
|
||||
batch := &Batch{Chunks: []*Chunk{chunk1, chunk2, chunk3}}
|
||||
|
||||
// Test Block methods
|
||||
assert.Equal(t, uint64(0), block1.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(2), block1.NumL2Transactions())
|
||||
assert.Equal(t, uint64(0), block2.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(1), block2.NumL2Transactions())
|
||||
assert.Equal(t, uint64(11), block3.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(1), block3.NumL2Transactions())
|
||||
assert.Equal(t, uint64(42), block4.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(0), block4.NumL2Transactions())
|
||||
assert.Equal(t, uint64(10), block5.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(0), block5.NumL2Transactions())
|
||||
assert.Equal(t, uint64(257), block6.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(0), block6.NumL2Transactions())
|
||||
|
||||
// Test Chunk methods
|
||||
assert.Equal(t, uint64(0), chunk1.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(3), chunk1.NumL2Transactions())
|
||||
crc1Max, err := chunk1.CrcMax()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(11), crc1Max)
|
||||
assert.Equal(t, uint64(3), chunk1.NumTransactions())
|
||||
assert.Equal(t, uint64(1194994), chunk1.L2GasUsed())
|
||||
|
||||
assert.Equal(t, uint64(42), chunk2.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(1), chunk2.NumL2Transactions())
|
||||
crc2Max, err := chunk2.CrcMax()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(0), crc2Max)
|
||||
assert.Equal(t, uint64(7), chunk2.NumTransactions())
|
||||
assert.Equal(t, uint64(144000), chunk2.L2GasUsed())
|
||||
|
||||
assert.Equal(t, uint64(257), chunk3.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(0), chunk3.NumL2Transactions())
|
||||
chunk3.Blocks[0].RowConsumption = nil
|
||||
crc3Max, err := chunk3.CrcMax()
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "block (17, 0x003fee335455c0c293dda17ea9365fe0caa94071ed7216baf61f7aeb808e8a28) has nil RowConsumption")
|
||||
assert.Equal(t, uint64(0), crc3Max)
|
||||
assert.Equal(t, uint64(5), chunk3.NumTransactions())
|
||||
assert.Equal(t, uint64(240000), chunk3.L2GasUsed())
|
||||
|
||||
// Test Batch methods
|
||||
assert.Equal(t, block6.Header.Root, batch.StateRoot())
|
||||
assert.Equal(t, block6.WithdrawRoot, batch.WithdrawRoot())
|
||||
}
|
||||
|
||||
func TestConvertTxDataToRLPEncoding(t *testing.T) {
|
||||
blocks := []*Block{
|
||||
readBlockFromJSON(t, "../../testdata/blockTrace_02.json"),
|
||||
readBlockFromJSON(t, "../../testdata/blockTrace_03.json"),
|
||||
readBlockFromJSON(t, "../../testdata/blockTrace_04.json"),
|
||||
readBlockFromJSON(t, "../../testdata/blockTrace_05.json"),
|
||||
readBlockFromJSON(t, "../../testdata/blockTrace_06.json"),
|
||||
readBlockFromJSON(t, "../../testdata/blockTrace_07.json"),
|
||||
}
|
||||
|
||||
for _, block := range blocks {
|
||||
for _, txData := range block.Transactions {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
|
||||
rlpTxData, err := ConvertTxDataToRLPEncoding(txData)
|
||||
assert.NoError(t, err)
|
||||
var tx types.Transaction
|
||||
err = tx.UnmarshalBinary(rlpTxData)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, txData.TxHash, tx.Hash().Hex())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyBatchRoots(t *testing.T) {
|
||||
emptyBatch := &Batch{Chunks: []*Chunk{}}
|
||||
assert.Equal(t, common.Hash{}, emptyBatch.StateRoot())
|
||||
assert.Equal(t, common.Hash{}, emptyBatch.WithdrawRoot())
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *Block {
|
||||
data, err := os.ReadFile(filename)
|
||||
assert.NoError(t, err)
|
||||
|
||||
block := &Block{}
|
||||
assert.NoError(t, json.Unmarshal(data, block))
|
||||
return block
|
||||
}
|
||||
@@ -16,6 +16,7 @@ var (
|
||||
&MetricsAddr,
|
||||
&MetricsPort,
|
||||
&ServicePortFlag,
|
||||
&Genesis,
|
||||
}
|
||||
// RollupRelayerFlags contains flags only used in rollup-relayer
|
||||
RollupRelayerFlags = []cli.Flag{
|
||||
@@ -83,4 +84,10 @@ var (
|
||||
Usage: "Port that the service will listen on",
|
||||
Value: 8080,
|
||||
}
|
||||
// Genesis is the genesis file
|
||||
Genesis = cli.StringFlag{
|
||||
Name: "genesis",
|
||||
Usage: "Genesis file of the network",
|
||||
Value: "./conf/genesis.json",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -3,11 +3,16 @@ package utils
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/modern-go/reflect2"
|
||||
"github.com/scroll-tech/go-ethereum/core"
|
||||
)
|
||||
|
||||
// TryTimes try run several times until the function return true.
|
||||
@@ -59,3 +64,17 @@ func RandomURL() string {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(5000-1))
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
// ReadGenesis parses and returns the genesis file at the given path
|
||||
func ReadGenesis(genesisPath string) (*core.Genesis, error) {
|
||||
file, err := os.Open(filepath.Clean(genesisPath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
||||
return nil, errors.Join(err, file.Close())
|
||||
}
|
||||
return genesis, file.Close()
|
||||
}
|
||||
|
||||
@@ -2,6 +2,9 @@ package version
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
|
||||
@@ -19,3 +22,20 @@ func CheckScrollProverVersion(proverVersion string) bool {
|
||||
// compare the `scroll_prover` version
|
||||
return remote[2] == local[2]
|
||||
}
|
||||
|
||||
// CheckScrollRepoVersion checks if the proverVersion is at least the minimum required version.
|
||||
func CheckScrollRepoVersion(proverVersion, minVersion string) bool {
|
||||
c, err := semver.NewConstraint(">= " + minVersion + "-0")
|
||||
if err != nil {
|
||||
log.Error("failed to initialize constraint", "minVersion", minVersion, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
v, err := semver.NewVersion(proverVersion + "-z")
|
||||
if err != nil {
|
||||
log.Error("failed to parse version", "proverVersion", proverVersion, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return c.Check(v)
|
||||
}
|
||||
|
||||
59
common/version/prover_version_test.go
Normal file
59
common/version/prover_version_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package version
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestCheckScrollProverVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
proverVersion string
|
||||
want bool
|
||||
}{
|
||||
{Version, true},
|
||||
{"tag-commit-111111-000000", false},
|
||||
{"incorrect-format", false},
|
||||
{"tag-commit-222222-111111", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if got := CheckScrollProverVersion(tt.proverVersion); got != tt.want {
|
||||
t.Errorf("CheckScrollProverVersion(%q) = %v, want %v", tt.proverVersion, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckScrollRepoVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
proverVersion string
|
||||
minVersion string
|
||||
want bool
|
||||
}{
|
||||
{"v1.2.3-commit-111111-000000", "v1.2.3-alpha", true},
|
||||
{"v1.2.3-patch-commit-111111-000000", "v1.2.2", true},
|
||||
{"v1.0.0", "v1.0.0-alpha", true},
|
||||
{"v1.2.2", "v1.2.3", false},
|
||||
{"v2.0.0", "v1.9.9", true},
|
||||
{"v0.9.0", "v1.0.0", false},
|
||||
{"v9.9.9", "v10.0.0-alpha", false},
|
||||
{"v4.1.98-aaa-bbb-ccc", "v999.0.0", false},
|
||||
{"v1.0.0", "v1.0.0", true},
|
||||
{"v1.0.0-alpha", "v1.0.0-alpha", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if got := CheckScrollRepoVersion(tt.proverVersion, tt.minVersion); got != tt.want {
|
||||
t.Errorf("CheckScrollRepoVersion(%q, %q) = %v, want %v", tt.proverVersion, tt.minVersion, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.52"
|
||||
var tag = "v4.3.78"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
1
contracts/.nvmrc
Normal file
1
contracts/.nvmrc
Normal file
@@ -0,0 +1 @@
|
||||
v18.15.0
|
||||
10
contracts/.solcover.js
Normal file
10
contracts/.solcover.js
Normal file
@@ -0,0 +1,10 @@
|
||||
module.exports = {
|
||||
skipFiles: [
|
||||
'mocks',
|
||||
'test',
|
||||
'L2/predeploys/L1BlockContainer.sol',
|
||||
'libraries/verifier/ZkTrieVerifier.sol',
|
||||
'libraries/verifier/PatriciaMerkleTrieVerifier.sol'
|
||||
],
|
||||
istanbulReporter: ["lcov", "json"]
|
||||
};
|
||||
2
contracts/circomlib.d.ts
vendored
2
contracts/circomlib.d.ts
vendored
@@ -1 +1,3 @@
|
||||
declare module "circomlib/src/evmasm";
|
||||
declare module "circomlib/src/poseidon_gencontract";
|
||||
declare module "circomlib/src/poseidon_constants";
|
||||
|
||||
@@ -162,7 +162,7 @@ Initialize the storage of L1ERC1155Gateway.
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _counterpart | address | The address of L2ERC1155Gateway in L2. |
|
||||
| _messenger | address | The address of L1ScrollMessenger. |
|
||||
| _messenger | address | The address of L1ScrollMessenger in L1. |
|
||||
|
||||
### messenger
|
||||
|
||||
@@ -389,12 +389,12 @@ Emitted when the ERC1155 NFT is batch deposited to gateway on layer 1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token `indexed` | address | undefined |
|
||||
| _l2Token `indexed` | address | undefined |
|
||||
| _from `indexed` | address | undefined |
|
||||
| _to | address | undefined |
|
||||
| _tokenIds | uint256[] | undefined |
|
||||
| _amounts | uint256[] | undefined |
|
||||
| _l1Token `indexed` | address | The address of ERC1155 NFT on layer 1. |
|
||||
| _l2Token `indexed` | address | The address of ERC1155 NFT on layer 2. |
|
||||
| _from `indexed` | address | The address of sender on layer 1. |
|
||||
| _to | address | The address of recipient on layer 2. |
|
||||
| _tokenIds | uint256[] | The list of token ids of the ERC1155 NFT to deposit on layer 1. |
|
||||
| _amounts | uint256[] | The list of corresponding number of token to deposit on layer 1. |
|
||||
|
||||
### BatchRefundERC1155
|
||||
|
||||
@@ -410,10 +410,10 @@ Emitted when some ERC1155 token is refunded.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| token `indexed` | address | undefined |
|
||||
| recipient `indexed` | address | undefined |
|
||||
| tokenIds | uint256[] | undefined |
|
||||
| amounts | uint256[] | undefined |
|
||||
| token `indexed` | address | The address of the token in L1. |
|
||||
| recipient `indexed` | address | The address of receiver in L1. |
|
||||
| tokenIds | uint256[] | The list of ids of token refunded. |
|
||||
| amounts | uint256[] | The list of amount of token refunded. |
|
||||
|
||||
### DepositERC1155
|
||||
|
||||
@@ -429,12 +429,12 @@ Emitted when the ERC1155 NFT is deposited to gateway on layer 1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token `indexed` | address | undefined |
|
||||
| _l2Token `indexed` | address | undefined |
|
||||
| _from `indexed` | address | undefined |
|
||||
| _to | address | undefined |
|
||||
| _tokenId | uint256 | undefined |
|
||||
| _amount | uint256 | undefined |
|
||||
| _l1Token `indexed` | address | The address of ERC1155 NFT on layer 1. |
|
||||
| _l2Token `indexed` | address | The address of ERC1155 NFT on layer 2. |
|
||||
| _from `indexed` | address | The address of sender on layer 1. |
|
||||
| _to | address | The address of recipient on layer 2. |
|
||||
| _tokenId | uint256 | The token id of the ERC1155 NFT to deposit on layer 1. |
|
||||
| _amount | uint256 | The number of token to deposit on layer 1. |
|
||||
|
||||
### FinalizeBatchWithdrawERC1155
|
||||
|
||||
@@ -442,7 +442,7 @@ Emitted when the ERC1155 NFT is deposited to gateway on layer 1.
|
||||
event FinalizeBatchWithdrawERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds, uint256[] _amounts)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is batch transfered to recipient on layer 1.
|
||||
Emitted when the ERC1155 NFT is batch transferred to recipient on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -450,12 +450,12 @@ Emitted when the ERC1155 NFT is batch transfered to recipient on layer 1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token `indexed` | address | undefined |
|
||||
| _l2Token `indexed` | address | undefined |
|
||||
| _from `indexed` | address | undefined |
|
||||
| _to | address | undefined |
|
||||
| _tokenIds | uint256[] | undefined |
|
||||
| _amounts | uint256[] | undefined |
|
||||
| _l1Token `indexed` | address | The address of ERC1155 NFT on layer 1. |
|
||||
| _l2Token `indexed` | address | The address of ERC1155 NFT on layer 2. |
|
||||
| _from `indexed` | address | The address of sender on layer 2. |
|
||||
| _to | address | The address of recipient on layer 1. |
|
||||
| _tokenIds | uint256[] | The list of token ids of the ERC1155 NFT to withdraw from layer 2. |
|
||||
| _amounts | uint256[] | The list of corresponding number of token to withdraw from layer 2. |
|
||||
|
||||
### FinalizeWithdrawERC1155
|
||||
|
||||
@@ -463,7 +463,7 @@ Emitted when the ERC1155 NFT is batch transfered to recipient on layer 1.
|
||||
event FinalizeWithdrawERC1155(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId, uint256 _amount)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is transfered to recipient on layer 1.
|
||||
Emitted when the ERC1155 NFT is transferred to recipient on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -471,12 +471,12 @@ Emitted when the ERC1155 NFT is transfered to recipient on layer 1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token `indexed` | address | undefined |
|
||||
| _l2Token `indexed` | address | undefined |
|
||||
| _from `indexed` | address | undefined |
|
||||
| _to | address | undefined |
|
||||
| _tokenId | uint256 | undefined |
|
||||
| _amount | uint256 | undefined |
|
||||
| _l1Token `indexed` | address | The address of ERC1155 NFT on layer 1. |
|
||||
| _l2Token `indexed` | address | The address of ERC1155 NFT on layer 2. |
|
||||
| _from `indexed` | address | The address of sender on layer 2. |
|
||||
| _to | address | The address of recipient on layer 1. |
|
||||
| _tokenId | uint256 | The token id of the ERC1155 NFT to withdraw from layer 2. |
|
||||
| _amount | uint256 | The number of token to withdraw from layer 2. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -486,7 +486,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -525,10 +525,10 @@ Emitted when some ERC1155 token is refunded.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| token `indexed` | address | undefined |
|
||||
| recipient `indexed` | address | undefined |
|
||||
| tokenId | uint256 | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| token `indexed` | address | The address of the token in L1. |
|
||||
| recipient `indexed` | address | The address of receiver in L1. |
|
||||
| tokenId | uint256 | The id of token refunded. |
|
||||
| amount | uint256 | The amount of token refunded. |
|
||||
|
||||
### UpdateTokenMapping
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ Initialize the storage of L1ERC721Gateway.
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _counterpart | address | The address of L2ERC721Gateway in L2. |
|
||||
| _messenger | address | The address of L1ScrollMessenger. |
|
||||
| _messenger | address | The address of L1ScrollMessenger in L1. |
|
||||
|
||||
### messenger
|
||||
|
||||
@@ -334,11 +334,11 @@ Emitted when the ERC721 NFT is batch deposited to gateway on layer 1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token `indexed` | address | undefined |
|
||||
| _l2Token `indexed` | address | undefined |
|
||||
| _from `indexed` | address | undefined |
|
||||
| _to | address | undefined |
|
||||
| _tokenIds | uint256[] | undefined |
|
||||
| _l1Token `indexed` | address | The address of ERC721 NFT on layer 1. |
|
||||
| _l2Token `indexed` | address | The address of ERC721 NFT on layer 2. |
|
||||
| _from `indexed` | address | The address of sender on layer 1. |
|
||||
| _to | address | The address of recipient on layer 2. |
|
||||
| _tokenIds | uint256[] | The list of token ids of the ERC721 NFT to deposit on layer 1. |
|
||||
|
||||
### BatchRefundERC721
|
||||
|
||||
@@ -354,9 +354,9 @@ Emitted when a batch of ERC721 tokens are refunded.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| token `indexed` | address | undefined |
|
||||
| recipient `indexed` | address | undefined |
|
||||
| tokenIds | uint256[] | undefined |
|
||||
| token `indexed` | address | The address of the token in L1. |
|
||||
| recipient `indexed` | address | The address of receiver in L1. |
|
||||
| tokenIds | uint256[] | The list of token ids of the ERC721 NFT refunded. |
|
||||
|
||||
### DepositERC721
|
||||
|
||||
@@ -372,11 +372,11 @@ Emitted when the ERC721 NFT is deposited to gateway on layer 1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token `indexed` | address | undefined |
|
||||
| _l2Token `indexed` | address | undefined |
|
||||
| _from `indexed` | address | undefined |
|
||||
| _to | address | undefined |
|
||||
| _tokenId | uint256 | undefined |
|
||||
| _l1Token `indexed` | address | The address of ERC721 NFT on layer 1. |
|
||||
| _l2Token `indexed` | address | The address of ERC721 NFT on layer 2. |
|
||||
| _from `indexed` | address | The address of sender on layer 1. |
|
||||
| _to | address | The address of recipient on layer 2. |
|
||||
| _tokenId | uint256 | The token id of the ERC721 NFT to deposit on layer 1. |
|
||||
|
||||
### FinalizeBatchWithdrawERC721
|
||||
|
||||
@@ -384,7 +384,7 @@ Emitted when the ERC721 NFT is deposited to gateway on layer 1.
|
||||
event FinalizeBatchWithdrawERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256[] _tokenIds)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is batch transfered to recipient on layer 1.
|
||||
Emitted when the ERC721 NFT is batch transferred to recipient on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -392,11 +392,11 @@ Emitted when the ERC721 NFT is batch transfered to recipient on layer 1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token `indexed` | address | undefined |
|
||||
| _l2Token `indexed` | address | undefined |
|
||||
| _from `indexed` | address | undefined |
|
||||
| _to | address | undefined |
|
||||
| _tokenIds | uint256[] | undefined |
|
||||
| _l1Token `indexed` | address | The address of ERC721 NFT on layer 1. |
|
||||
| _l2Token `indexed` | address | The address of ERC721 NFT on layer 2. |
|
||||
| _from `indexed` | address | The address of sender on layer 2. |
|
||||
| _to | address | The address of recipient on layer 1. |
|
||||
| _tokenIds | uint256[] | The list of token ids of the ERC721 NFT to withdraw from layer 2. |
|
||||
|
||||
### FinalizeWithdrawERC721
|
||||
|
||||
@@ -404,7 +404,7 @@ Emitted when the ERC721 NFT is batch transfered to recipient on layer 1.
|
||||
event FinalizeWithdrawERC721(address indexed _l1Token, address indexed _l2Token, address indexed _from, address _to, uint256 _tokenId)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is transfered to recipient on layer 1.
|
||||
Emitted when the ERC721 NFT is transferred to recipient on layer 1.
|
||||
|
||||
|
||||
|
||||
@@ -412,11 +412,11 @@ Emitted when the ERC721 NFT is transfered to recipient on layer 1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token `indexed` | address | undefined |
|
||||
| _l2Token `indexed` | address | undefined |
|
||||
| _from `indexed` | address | undefined |
|
||||
| _to | address | undefined |
|
||||
| _tokenId | uint256 | undefined |
|
||||
| _l1Token `indexed` | address | The address of ERC721 NFT on layer 1. |
|
||||
| _l2Token `indexed` | address | The address of ERC721 NFT on layer 2. |
|
||||
| _from `indexed` | address | The address of sender on layer 2. |
|
||||
| _to | address | The address of recipient on layer 1. |
|
||||
| _tokenId | uint256 | The token id of the ERC721 NFT to withdraw from layer 2. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -426,7 +426,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -465,9 +465,9 @@ Emitted when some ERC721 token is refunded.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| token `indexed` | address | undefined |
|
||||
| recipient `indexed` | address | undefined |
|
||||
| tokenId | uint256 | undefined |
|
||||
| token `indexed` | address | The address of the token in L1. |
|
||||
| recipient `indexed` | address | The address of receiver in L1. |
|
||||
| tokenId | uint256 | The id of token refunded. |
|
||||
|
||||
### UpdateTokenMapping
|
||||
|
||||
|
||||
@@ -168,7 +168,7 @@ function ethGateway() external view returns (address)
|
||||
|
||||
The address of L1ETHGateway.
|
||||
|
||||
*This variable is no longer used.*
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
@@ -286,7 +286,7 @@ function initialize(address _ethGateway, address _defaultERC20Gateway) external
|
||||
|
||||
Initialize the storage of L1GatewayRouter.
|
||||
|
||||
*The parameters `_ethGateway` is no longer used.*
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -295,23 +295,6 @@ Initialize the storage of L1GatewayRouter.
|
||||
| _ethGateway | address | The address of L1ETHGateway contract. |
|
||||
| _defaultERC20Gateway | address | The address of default ERC20 Gateway contract. |
|
||||
|
||||
### messenger
|
||||
|
||||
```solidity
|
||||
function messenger() external view returns (address)
|
||||
```
|
||||
|
||||
The address of `L1ScrollMessenger`.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### owner
|
||||
|
||||
```solidity
|
||||
@@ -447,12 +430,12 @@ Emitted when someone deposit ERC20 token from L1 to L2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L1. |
|
||||
| to | address | The address of recipient in L2. |
|
||||
| amount | uint256 | The amount of token will be deposited from L1 to L2. |
|
||||
| data | bytes | The optional calldata passed to recipient in L2. |
|
||||
|
||||
### DepositETH
|
||||
|
||||
@@ -468,10 +451,10 @@ Emitted when someone deposit ETH from L1 to L2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| from `indexed` | address | undefined |
|
||||
| to `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| from `indexed` | address | The address of sender in L1. |
|
||||
| to `indexed` | address | The address of recipient in L2. |
|
||||
| amount | uint256 | The amount of ETH will be deposited from L1 to L2. |
|
||||
| data | bytes | The optional calldata passed to recipient in L2. |
|
||||
|
||||
### FinalizeWithdrawERC20
|
||||
|
||||
@@ -487,12 +470,12 @@ Emitted when ERC20 token is withdrawn from L2 to L1 and transfer to recipient.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L2. |
|
||||
| to | address | The address of recipient in L1. |
|
||||
| amount | uint256 | The amount of token withdrawn from L2 to L1. |
|
||||
| data | bytes | The optional calldata passed to recipient in L1. |
|
||||
|
||||
### FinalizeWithdrawETH
|
||||
|
||||
@@ -508,10 +491,10 @@ Emitted when ETH is withdrawn from L2 to L1 and transfer to recipient.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| from `indexed` | address | undefined |
|
||||
| to `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| from `indexed` | address | The address of sender in L2. |
|
||||
| to `indexed` | address | The address of recipient in L1. |
|
||||
| amount | uint256 | The amount of ETH withdrawn from L2 to L1. |
|
||||
| data | bytes | The optional calldata passed to recipient in L1. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -521,7 +504,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -560,9 +543,9 @@ Emitted when some ERC20 token is refunded.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| token `indexed` | address | undefined |
|
||||
| recipient `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| token `indexed` | address | The address of the token in L1. |
|
||||
| recipient `indexed` | address | The address of receiver in L1. |
|
||||
| amount | uint256 | The amount of token refunded to receiver. |
|
||||
|
||||
### RefundETH
|
||||
|
||||
@@ -578,8 +561,8 @@ Emitted when some ETH is refunded.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| recipient `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| recipient `indexed` | address | The address of receiver in L1. |
|
||||
| amount | uint256 | The amount of ETH refunded to receiver. |
|
||||
|
||||
### SetDefaultERC20Gateway
|
||||
|
||||
@@ -595,8 +578,8 @@ Emitted when the address of default ERC20 Gateway is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| oldDefaultERC20Gateway `indexed` | address | undefined |
|
||||
| newDefaultERC20Gateway `indexed` | address | undefined |
|
||||
| oldDefaultERC20Gateway `indexed` | address | The address of the old default ERC20 Gateway. |
|
||||
| newDefaultERC20Gateway `indexed` | address | The address of the new default ERC20 Gateway. |
|
||||
|
||||
### SetERC20Gateway
|
||||
|
||||
@@ -612,9 +595,9 @@ Emitted when the `gateway` for `token` is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| token `indexed` | address | undefined |
|
||||
| oldGateway `indexed` | address | undefined |
|
||||
| newGateway `indexed` | address | undefined |
|
||||
| token `indexed` | address | The address of token updated. |
|
||||
| oldGateway `indexed` | address | The corresponding address of the old gateway. |
|
||||
| newGateway `indexed` | address | The corresponding address of the new gateway. |
|
||||
|
||||
### SetETHGateway
|
||||
|
||||
@@ -630,22 +613,8 @@ Emitted when the address of ETH Gateway is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| oldETHGateway `indexed` | address | undefined |
|
||||
| newEthGateway `indexed` | address | undefined |
|
||||
|
||||
|
||||
|
||||
## Errors
|
||||
|
||||
### ErrorZeroAddress
|
||||
|
||||
```solidity
|
||||
error ErrorZeroAddress()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the given address is `address(0)`.*
|
||||
| oldETHGateway `indexed` | address | The address of the old ETH Gateway. |
|
||||
| newEthGateway `indexed` | address | The address of the new ETH Gateway. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -471,7 +471,7 @@ Emitted when a cross domain message is failed to relay.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| messageHash `indexed` | bytes32 | undefined |
|
||||
| messageHash `indexed` | bytes32 | The hash of the message. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -481,7 +481,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -514,7 +514,7 @@ event Paused(address account)
|
||||
|
||||
|
||||
|
||||
|
||||
*Emitted when the pause is triggered by `account`.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -536,7 +536,7 @@ Emitted when a cross domain message is relayed successfully.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| messageHash `indexed` | bytes32 | undefined |
|
||||
| messageHash `indexed` | bytes32 | The hash of the message. |
|
||||
|
||||
### SentMessage
|
||||
|
||||
@@ -552,12 +552,12 @@ Emitted when a cross domain message is sent.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| sender `indexed` | address | undefined |
|
||||
| target `indexed` | address | undefined |
|
||||
| value | uint256 | undefined |
|
||||
| messageNonce | uint256 | undefined |
|
||||
| gasLimit | uint256 | undefined |
|
||||
| message | bytes | undefined |
|
||||
| sender `indexed` | address | The address of the sender who initiates the message. |
|
||||
| target `indexed` | address | The address of target contract to call. |
|
||||
| value | uint256 | The amount of value passed to the target contract. |
|
||||
| messageNonce | uint256 | The nonce of the message. |
|
||||
| gasLimit | uint256 | The optional gas limit passed to L1 or L2. |
|
||||
| message | bytes | The calldata passed to the target contract. |
|
||||
|
||||
### Unpaused
|
||||
|
||||
@@ -567,7 +567,7 @@ event Unpaused(address account)
|
||||
|
||||
|
||||
|
||||
|
||||
*Emitted when the pause is lifted by `account`.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -589,8 +589,8 @@ Emitted when owner updates fee vault contract.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldFeeVault | address | undefined |
|
||||
| _newFeeVault | address | undefined |
|
||||
| _oldFeeVault | address | The address of old fee vault contract. |
|
||||
| _newFeeVault | address | The address of new fee vault contract. |
|
||||
|
||||
### UpdateMaxReplayTimes
|
||||
|
||||
@@ -606,8 +606,8 @@ Emitted when the maximum number of times each message can be replayed is updated
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| oldMaxReplayTimes | uint256 | undefined |
|
||||
| newMaxReplayTimes | uint256 | undefined |
|
||||
| oldMaxReplayTimes | uint256 | The old maximum number of times each message can be replayed. |
|
||||
| newMaxReplayTimes | uint256 | The new maximum number of times each message can be replayed. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -130,7 +130,7 @@ Return the corresponding l2 token address given l1 token address.
|
||||
### initialize
|
||||
|
||||
```solidity
|
||||
function initialize(address _counterpart, address _router, address _messenger, address _l2TokenImplementation, address _l2TokenFactory) external nonpayable
|
||||
function initialize(address _counterpart, address _router, address _messenger, address, address) external nonpayable
|
||||
```
|
||||
|
||||
Initialize the storage of L1StandardERC20Gateway.
|
||||
@@ -142,10 +142,10 @@ Initialize the storage of L1StandardERC20Gateway.
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _counterpart | address | The address of L2StandardERC20Gateway in L2. |
|
||||
| _router | address | The address of L1GatewayRouter. |
|
||||
| _messenger | address | The address of L1ScrollMessenger. |
|
||||
| _l2TokenImplementation | address | The address of ScrollStandardERC20 implementation in L2. |
|
||||
| _l2TokenFactory | address | The address of ScrollStandardERC20Factory contract in L2. |
|
||||
| _router | address | The address of L1GatewayRouter in L1. |
|
||||
| _messenger | address | The address of L1ScrollMessenger in L1. |
|
||||
| _3 | address | undefined |
|
||||
| _4 | address | undefined |
|
||||
|
||||
### l2TokenFactory
|
||||
|
||||
@@ -293,12 +293,12 @@ Emitted when someone deposit ERC20 token from L1 to L2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L1. |
|
||||
| to | address | The address of recipient in L2. |
|
||||
| amount | uint256 | The amount of token will be deposited from L1 to L2. |
|
||||
| data | bytes | The optional calldata passed to recipient in L2. |
|
||||
|
||||
### FinalizeWithdrawERC20
|
||||
|
||||
@@ -314,12 +314,12 @@ Emitted when ERC20 token is withdrawn from L2 to L1 and transfer to recipient.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L2. |
|
||||
| to | address | The address of recipient in L1. |
|
||||
| amount | uint256 | The amount of token withdrawn from L2 to L1. |
|
||||
| data | bytes | The optional calldata passed to recipient in L1. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -329,7 +329,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -368,9 +368,9 @@ Emitted when some ERC20 token is refunded.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| token `indexed` | address | undefined |
|
||||
| recipient `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| token `indexed` | address | The address of the token in L1. |
|
||||
| recipient `indexed` | address | The address of receiver in L1. |
|
||||
| amount | uint256 | The amount of token refunded to receiver. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
The `L1WETHGateway` contract is used to deposit `WETH` token on layer 1 and finalize withdraw `WETH` from layer 2.
|
||||
|
||||
*The deposited WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L1ScrollMessenger` contract. On finalizing withdraw, the Ether will be transfered from `L1ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*
|
||||
*The deposited WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L1ScrollMessenger` contract. On finalizing withdraw, the Ether will be transferred from `L1ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*
|
||||
|
||||
## Methods
|
||||
|
||||
@@ -152,15 +152,15 @@ function initialize(address _counterpart, address _router, address _messenger) e
|
||||
|
||||
Initialize the storage of L1WETHGateway.
|
||||
|
||||
|
||||
*The parameters `_counterpart`, `_router` and `_messenger` are no longer used.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _counterpart | address | The address of L2ETHGateway in L2. |
|
||||
| _router | address | The address of L1GatewayRouter. |
|
||||
| _messenger | address | The address of L1ScrollMessenger. |
|
||||
| _router | address | The address of L1GatewayRouter in L1. |
|
||||
| _messenger | address | The address of L1ScrollMessenger in L1. |
|
||||
|
||||
### l2WETH
|
||||
|
||||
@@ -291,12 +291,12 @@ Emitted when someone deposit ERC20 token from L1 to L2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L1. |
|
||||
| to | address | The address of recipient in L2. |
|
||||
| amount | uint256 | The amount of token will be deposited from L1 to L2. |
|
||||
| data | bytes | The optional calldata passed to recipient in L2. |
|
||||
|
||||
### FinalizeWithdrawERC20
|
||||
|
||||
@@ -312,12 +312,12 @@ Emitted when ERC20 token is withdrawn from L2 to L1 and transfer to recipient.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L2. |
|
||||
| to | address | The address of recipient in L1. |
|
||||
| amount | uint256 | The amount of token withdrawn from L2 to L1. |
|
||||
| data | bytes | The optional calldata passed to recipient in L1. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -327,7 +327,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -366,9 +366,9 @@ Emitted when some ERC20 token is refunded.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| token `indexed` | address | undefined |
|
||||
| recipient `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| token `indexed` | address | The address of the token in L1. |
|
||||
| recipient `indexed` | address | The address of receiver in L1. |
|
||||
| amount | uint256 | The amount of token refunded to receiver. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
The `L2ERC1155Gateway` is used to withdraw ERC1155 compatible NFTs on layer 2 and finalize deposit the NFTs from layer 1.
|
||||
|
||||
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transfered to the recipient. This will be changed if we have more specific scenarios.*
|
||||
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transferred to the recipient. This will be changed if we have more specific scenarios.*
|
||||
|
||||
## Methods
|
||||
|
||||
@@ -365,7 +365,7 @@ Withdraw some ERC1155 NFT to caller's account on layer 1.
|
||||
event BatchWithdrawERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds, uint256[] amounts)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is batch transfered to gateway on layer 2.
|
||||
Emitted when the ERC1155 NFT is batch transferred to gateway on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -373,12 +373,12 @@ Emitted when the ERC1155 NFT is batch transfered to gateway on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| tokenIds | uint256[] | undefined |
|
||||
| amounts | uint256[] | undefined |
|
||||
| l1Token `indexed` | address | The address of ERC1155 NFT on layer 1. |
|
||||
| l2Token `indexed` | address | The address of ERC1155 NFT on layer 2. |
|
||||
| from `indexed` | address | The address of sender on layer 2. |
|
||||
| to | address | The address of recipient on layer 1. |
|
||||
| tokenIds | uint256[] | The list of token ids of the ERC1155 NFT to withdraw on layer 2. |
|
||||
| amounts | uint256[] | The list of corresponding amounts to withdraw. |
|
||||
|
||||
### FinalizeBatchDepositERC1155
|
||||
|
||||
@@ -386,7 +386,7 @@ Emitted when the ERC1155 NFT is batch transfered to gateway on layer 2.
|
||||
event FinalizeBatchDepositERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds, uint256[] amounts)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is batch transfered to recipient on layer 2.
|
||||
Emitted when the ERC1155 NFT is batch transferred to recipient on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -394,12 +394,12 @@ Emitted when the ERC1155 NFT is batch transfered to recipient on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| tokenIds | uint256[] | undefined |
|
||||
| amounts | uint256[] | undefined |
|
||||
| l1Token `indexed` | address | The address of ERC1155 NFT on layer 1. |
|
||||
| l2Token `indexed` | address | The address of ERC1155 NFT on layer 2. |
|
||||
| from `indexed` | address | The address of sender on layer 1. |
|
||||
| to | address | The address of recipient on layer 2. |
|
||||
| tokenIds | uint256[] | The list of token ids of the ERC1155 NFT deposited on layer 1. |
|
||||
| amounts | uint256[] | The list of corresponding amounts deposited. |
|
||||
|
||||
### FinalizeDepositERC1155
|
||||
|
||||
@@ -407,7 +407,7 @@ Emitted when the ERC1155 NFT is batch transfered to recipient on layer 2.
|
||||
event FinalizeDepositERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId, uint256 amount)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is transfered to recipient on layer 2.
|
||||
Emitted when the ERC1155 NFT is transferred to recipient on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -415,12 +415,12 @@ Emitted when the ERC1155 NFT is transfered to recipient on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| tokenId | uint256 | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| l1Token `indexed` | address | The address of ERC1155 NFT on layer 1. |
|
||||
| l2Token `indexed` | address | The address of ERC1155 NFT on layer 2. |
|
||||
| from `indexed` | address | The address of sender on layer 1. |
|
||||
| to | address | The address of recipient on layer 2. |
|
||||
| tokenId | uint256 | The token id of the ERC1155 NFT deposited on layer 1. |
|
||||
| amount | uint256 | The amount of token deposited. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -430,7 +430,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -479,7 +479,7 @@ Emitted when token mapping for ERC1155 token is updated.
|
||||
event WithdrawERC1155(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId, uint256 amount)
|
||||
```
|
||||
|
||||
Emitted when the ERC1155 NFT is transfered to gateway on layer 2.
|
||||
Emitted when the ERC1155 NFT is transferred to gateway on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -487,12 +487,12 @@ Emitted when the ERC1155 NFT is transfered to gateway on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| tokenId | uint256 | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| l1Token `indexed` | address | The address of ERC1155 NFT on layer 1. |
|
||||
| l2Token `indexed` | address | The address of ERC1155 NFT on layer 2. |
|
||||
| from `indexed` | address | The address of sender on layer 2. |
|
||||
| to | address | The address of recipient on layer 1. |
|
||||
| tokenId | uint256 | The token id of the ERC1155 NFT to withdraw on layer 2. |
|
||||
| amount | uint256 | The amount of token to withdraw. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
The `L2ERC721Gateway` is used to withdraw ERC721 compatible NFTs on layer 2 and finalize deposit the NFTs from layer 1.
|
||||
|
||||
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transfered to the recipient. This will be changed if we have more specific scenarios.*
|
||||
*The withdrawn NFTs tokens will be burned directly. On finalizing deposit, the corresponding NFT will be minted and transferred to the recipient. This will be changed if we have more specific scenarios.*
|
||||
|
||||
## Methods
|
||||
|
||||
@@ -310,7 +310,7 @@ Withdraw some ERC721 NFT to caller's account on layer 1.
|
||||
event BatchWithdrawERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is batch transfered to gateway on layer 2.
|
||||
Emitted when the ERC721 NFT is batch transferred to gateway on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -318,11 +318,11 @@ Emitted when the ERC721 NFT is batch transfered to gateway on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| tokenIds | uint256[] | undefined |
|
||||
| l1Token `indexed` | address | The address of ERC721 NFT on layer 1. |
|
||||
| l2Token `indexed` | address | The address of ERC721 NFT on layer 2. |
|
||||
| from `indexed` | address | The address of sender on layer 2. |
|
||||
| to | address | The address of recipient on layer 1. |
|
||||
| tokenIds | uint256[] | The list of token ids of the ERC721 NFT to withdraw on layer 2. |
|
||||
|
||||
### FinalizeBatchDepositERC721
|
||||
|
||||
@@ -330,7 +330,7 @@ Emitted when the ERC721 NFT is batch transfered to gateway on layer 2.
|
||||
event FinalizeBatchDepositERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256[] tokenIds)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is batch transfered to recipient on layer 2.
|
||||
Emitted when the ERC721 NFT is batch transferred to recipient on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -338,11 +338,11 @@ Emitted when the ERC721 NFT is batch transfered to recipient on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| tokenIds | uint256[] | undefined |
|
||||
| l1Token `indexed` | address | The address of ERC721 NFT on layer 1. |
|
||||
| l2Token `indexed` | address | The address of ERC721 NFT on layer 2. |
|
||||
| from `indexed` | address | The address of sender on layer 1. |
|
||||
| to | address | The address of recipient on layer 2. |
|
||||
| tokenIds | uint256[] | The list of token ids of the ERC721 NFT deposited on layer 1. |
|
||||
|
||||
### FinalizeDepositERC721
|
||||
|
||||
@@ -350,7 +350,7 @@ Emitted when the ERC721 NFT is batch transfered to recipient on layer 2.
|
||||
event FinalizeDepositERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is transfered to recipient on layer 2.
|
||||
Emitted when the ERC721 NFT is transferred to recipient on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -358,11 +358,11 @@ Emitted when the ERC721 NFT is transfered to recipient on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| tokenId | uint256 | undefined |
|
||||
| l1Token `indexed` | address | The address of ERC721 NFT on layer 1. |
|
||||
| l2Token `indexed` | address | The address of ERC721 NFT on layer 2. |
|
||||
| from `indexed` | address | The address of sender on layer 1. |
|
||||
| to | address | The address of recipient on layer 2. |
|
||||
| tokenId | uint256 | The token id of the ERC721 NFT deposited on layer 1. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -372,7 +372,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -421,7 +421,7 @@ Emitted when token mapping for ERC721 token is updated.
|
||||
event WithdrawERC721(address indexed l1Token, address indexed l2Token, address indexed from, address to, uint256 tokenId)
|
||||
```
|
||||
|
||||
Emitted when the ERC721 NFT is transfered to gateway on layer 2.
|
||||
Emitted when the ERC721 NFT is transferred to gateway on layer 2.
|
||||
|
||||
|
||||
|
||||
@@ -429,11 +429,11 @@ Emitted when the ERC721 NFT is transfered to gateway on layer 2.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| tokenId | uint256 | undefined |
|
||||
| l1Token `indexed` | address | The address of ERC721 NFT on layer 1. |
|
||||
| l2Token `indexed` | address | The address of ERC721 NFT on layer 2. |
|
||||
| from `indexed` | address | The address of sender on layer 2. |
|
||||
| to | address | The address of recipient on layer 1. |
|
||||
| tokenId | uint256 | The token id of the ERC721 NFT to withdraw on layer 2. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -189,23 +189,6 @@ function initialize(address _ethGateway, address _defaultERC20Gateway) external
|
||||
| _ethGateway | address | undefined |
|
||||
| _defaultERC20Gateway | address | undefined |
|
||||
|
||||
### messenger
|
||||
|
||||
```solidity
|
||||
function messenger() external view returns (address)
|
||||
```
|
||||
|
||||
The address of `L2ScrollMessenger`.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### owner
|
||||
|
||||
```solidity
|
||||
@@ -428,12 +411,12 @@ Emitted when ERC20 token is deposited from L1 to L2 and transfer to recipient.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L1. |
|
||||
| to | address | The address of recipient in L2. |
|
||||
| amount | uint256 | The amount of token withdrawn from L1 to L2. |
|
||||
| data | bytes | The optional calldata passed to recipient in L2. |
|
||||
|
||||
### FinalizeDepositETH
|
||||
|
||||
@@ -449,10 +432,10 @@ Emitted when ETH is deposited from L1 to L2 and transfer to recipient.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| from `indexed` | address | undefined |
|
||||
| to `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| from `indexed` | address | The address of sender in L1. |
|
||||
| to `indexed` | address | The address of recipient in L2. |
|
||||
| amount | uint256 | The amount of ETH deposited from L1 to L2. |
|
||||
| data | bytes | The optional calldata passed to recipient in L2. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -462,7 +445,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -501,8 +484,8 @@ Emitted when the address of default ERC20 Gateway is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| oldDefaultERC20Gateway `indexed` | address | undefined |
|
||||
| newDefaultERC20Gateway `indexed` | address | undefined |
|
||||
| oldDefaultERC20Gateway `indexed` | address | The address of the old default ERC20 Gateway. |
|
||||
| newDefaultERC20Gateway `indexed` | address | The address of the new default ERC20 Gateway. |
|
||||
|
||||
### SetERC20Gateway
|
||||
|
||||
@@ -518,9 +501,9 @@ Emitted when the `gateway` for `token` is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| token `indexed` | address | undefined |
|
||||
| oldGateway `indexed` | address | undefined |
|
||||
| newGateway `indexed` | address | undefined |
|
||||
| token `indexed` | address | The address of token updated. |
|
||||
| oldGateway `indexed` | address | The corresponding address of the old gateway. |
|
||||
| newGateway `indexed` | address | The corresponding address of the new gateway. |
|
||||
|
||||
### SetETHGateway
|
||||
|
||||
@@ -536,8 +519,8 @@ Emitted when the address of ETH Gateway is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| oldETHGateway `indexed` | address | undefined |
|
||||
| newEthGateway `indexed` | address | undefined |
|
||||
| oldETHGateway `indexed` | address | The address of the old ETH Gateway. |
|
||||
| newEthGateway `indexed` | address | The address of the new ETH Gateway. |
|
||||
|
||||
### WithdrawERC20
|
||||
|
||||
@@ -553,12 +536,12 @@ Emitted when someone withdraw ERC20 token from L2 to L1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L2. |
|
||||
| to | address | The address of recipient in L1. |
|
||||
| amount | uint256 | The amount of token will be deposited from L2 to L1. |
|
||||
| data | bytes | The optional calldata passed to recipient in L1. |
|
||||
|
||||
### WithdrawETH
|
||||
|
||||
@@ -574,24 +557,10 @@ Emitted when someone withdraw ETH from L2 to L1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| from `indexed` | address | undefined |
|
||||
| to `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
|
||||
|
||||
|
||||
## Errors
|
||||
|
||||
### ErrorZeroAddress
|
||||
|
||||
```solidity
|
||||
error ErrorZeroAddress()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the given address is `address(0)`.*
|
||||
| from `indexed` | address | The address of sender in L2. |
|
||||
| to `indexed` | address | The address of recipient in L1. |
|
||||
| amount | uint256 | The amount of ETH will be deposited from L2 to L1. |
|
||||
| data | bytes | The optional calldata passed to recipient in L1. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -308,7 +308,7 @@ Emitted when a cross domain message is failed to relay.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| messageHash `indexed` | bytes32 | undefined |
|
||||
| messageHash `indexed` | bytes32 | The hash of the message. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -318,7 +318,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -351,7 +351,7 @@ event Paused(address account)
|
||||
|
||||
|
||||
|
||||
|
||||
*Emitted when the pause is triggered by `account`.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -373,7 +373,7 @@ Emitted when a cross domain message is relayed successfully.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| messageHash `indexed` | bytes32 | undefined |
|
||||
| messageHash `indexed` | bytes32 | The hash of the message. |
|
||||
|
||||
### SentMessage
|
||||
|
||||
@@ -389,12 +389,12 @@ Emitted when a cross domain message is sent.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| sender `indexed` | address | undefined |
|
||||
| target `indexed` | address | undefined |
|
||||
| value | uint256 | undefined |
|
||||
| messageNonce | uint256 | undefined |
|
||||
| gasLimit | uint256 | undefined |
|
||||
| message | bytes | undefined |
|
||||
| sender `indexed` | address | The address of the sender who initiates the message. |
|
||||
| target `indexed` | address | The address of target contract to call. |
|
||||
| value | uint256 | The amount of value passed to the target contract. |
|
||||
| messageNonce | uint256 | The nonce of the message. |
|
||||
| gasLimit | uint256 | The optional gas limit passed to L1 or L2. |
|
||||
| message | bytes | The calldata passed to the target contract. |
|
||||
|
||||
### Unpaused
|
||||
|
||||
@@ -404,7 +404,7 @@ event Unpaused(address account)
|
||||
|
||||
|
||||
|
||||
|
||||
*Emitted when the pause is lifted by `account`.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -426,8 +426,8 @@ Emitted when owner updates fee vault contract.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldFeeVault | address | undefined |
|
||||
| _newFeeVault | address | undefined |
|
||||
| _oldFeeVault | address | The address of old fee vault contract. |
|
||||
| _newFeeVault | address | The address of new fee vault contract. |
|
||||
|
||||
### UpdateMaxFailedExecutionTimes
|
||||
|
||||
@@ -443,8 +443,8 @@ Emitted when the maximum number of times each message can fail in L2 is updated.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| oldMaxFailedExecutionTimes | uint256 | undefined |
|
||||
| newMaxFailedExecutionTimes | uint256 | undefined |
|
||||
| oldMaxFailedExecutionTimes | uint256 | The old maximum number of times each message can fail in L2. |
|
||||
| newMaxFailedExecutionTimes | uint256 | The new maximum number of times each message can fail in L2. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
The `L2StandardERC20Gateway` is used to withdraw standard ERC20 tokens on layer 2 and finalize deposit the tokens from layer 1.
|
||||
|
||||
*The withdrawn ERC20 tokens will be burned directly. On finalizing deposit, the corresponding token will be minted and transfered to the recipient. Any ERC20 that requires non-standard functionality should use a separate gateway.*
|
||||
*The withdrawn ERC20 tokens will be burned directly. On finalizing deposit, the corresponding token will be minted and transferred to the recipient. Any ERC20 that requires non-standard functionality should use a separate gateway.*
|
||||
|
||||
## Methods
|
||||
|
||||
@@ -95,7 +95,7 @@ Return the corresponding l2 token address given l1 token address.
|
||||
### initialize
|
||||
|
||||
```solidity
|
||||
function initialize(address _counterpart, address _router, address _messenger, address _tokenFactory) external nonpayable
|
||||
function initialize(address _counterpart, address _router, address _messenger, address) external nonpayable
|
||||
```
|
||||
|
||||
Initialize the storage of L2StandardERC20Gateway.
|
||||
@@ -106,10 +106,10 @@ Initialize the storage of L2StandardERC20Gateway.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _counterpart | address | The address of L1ETHGateway in L1. |
|
||||
| _router | address | The address of L2GatewayRouter. |
|
||||
| _messenger | address | The address of L2ScrollMessenger. |
|
||||
| _tokenFactory | address | The address of ScrollStandardERC20Factory. |
|
||||
| _counterpart | address | The address of `L1StandardERC20Gateway` contract in L1. |
|
||||
| _router | address | The address of `L2GatewayRouter` contract in L2. |
|
||||
| _messenger | address | The address of `L2ScrollMessenger` contract in L2. |
|
||||
| _3 | address | undefined |
|
||||
|
||||
### messenger
|
||||
|
||||
@@ -281,12 +281,12 @@ Emitted when ERC20 token is deposited from L1 to L2 and transfer to recipient.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L1. |
|
||||
| to | address | The address of recipient in L2. |
|
||||
| amount | uint256 | The amount of token withdrawn from L1 to L2. |
|
||||
| data | bytes | The optional calldata passed to recipient in L2. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -296,7 +296,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -335,12 +335,12 @@ Emitted when someone withdraw ERC20 token from L2 to L1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L2. |
|
||||
| to | address | The address of recipient in L1. |
|
||||
| amount | uint256 | The amount of token will be deposited from L2 to L1. |
|
||||
| data | bytes | The optional calldata passed to recipient in L1. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
The `L2WETHGateway` contract is used to withdraw `WETH` token on layer 2 and finalize deposit `WETH` from layer 1.
|
||||
|
||||
*The WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L2ScrollMessenger` contract. On finalizing deposit, the Ether will be transfered from `L2ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*
|
||||
*The WETH tokens are not held in the gateway. It will first be unwrapped as Ether and then the Ether will be sent to the `L2ScrollMessenger` contract. On finalizing deposit, the Ether will be transferred from `L2ScrollMessenger`, then wrapped as WETH and finally transfer to recipient.*
|
||||
|
||||
## Methods
|
||||
|
||||
@@ -297,12 +297,12 @@ Emitted when ERC20 token is deposited from L1 to L2 and transfer to recipient.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L1. |
|
||||
| to | address | The address of recipient in L2. |
|
||||
| amount | uint256 | The amount of token withdrawn from L1 to L2. |
|
||||
| data | bytes | The optional calldata passed to recipient in L2. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -312,7 +312,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -351,12 +351,12 @@ Emitted when someone withdraw ERC20 token from L2 to L1.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| l1Token `indexed` | address | undefined |
|
||||
| l2Token `indexed` | address | undefined |
|
||||
| from `indexed` | address | undefined |
|
||||
| to | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
| data | bytes | undefined |
|
||||
| l1Token `indexed` | address | The address of the token in L1. |
|
||||
| l2Token `indexed` | address | The address of the token in L2. |
|
||||
| from `indexed` | address | The address of sender in L2. |
|
||||
| to | address | The address of recipient in L1. |
|
||||
| amount | uint256 | The amount of token will be deposited from L2 to L1. |
|
||||
| data | bytes | The optional calldata passed to recipient in L1. |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ function finalizeBatchWithProof(bytes _batchHeader, bytes32 _prevStateRoot, byte
|
||||
|
||||
Finalize a committed batch on layer 1.
|
||||
|
||||
|
||||
*We keep this function to upgrade to 4844 more smoothly.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -103,6 +103,27 @@ Finalize a committed batch on layer 1.
|
||||
| _withdrawRoot | bytes32 | undefined |
|
||||
| _aggrProof | bytes | undefined |
|
||||
|
||||
### finalizeBatchWithProof4844
|
||||
|
||||
```solidity
|
||||
function finalizeBatchWithProof4844(bytes _batchHeader, bytes32 _prevStateRoot, bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes _blobDataProof, bytes _aggrProof) external nonpayable
|
||||
```
|
||||
|
||||
Finalize a committed batch (with blob) on layer 1.
|
||||
|
||||
*Memory layout of `_blobDataProof`: ```text | z | y | kzg_commitment | kzg_proof | |---------|---------|----------------|-----------| | bytes32 | bytes32 | bytes48 | bytes48 | ```*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _batchHeader | bytes | undefined |
|
||||
| _prevStateRoot | bytes32 | undefined |
|
||||
| _postStateRoot | bytes32 | undefined |
|
||||
| _withdrawRoot | bytes32 | undefined |
|
||||
| _blobDataProof | bytes | undefined |
|
||||
| _aggrProof | bytes | undefined |
|
||||
|
||||
### finalizedStateRoots
|
||||
|
||||
```solidity
|
||||
@@ -493,8 +514,8 @@ Emitted when a new batch is committed.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| batchIndex `indexed` | uint256 | undefined |
|
||||
| batchHash `indexed` | bytes32 | undefined |
|
||||
| batchIndex `indexed` | uint256 | The index of the batch. |
|
||||
| batchHash `indexed` | bytes32 | The hash of the batch. |
|
||||
|
||||
### FinalizeBatch
|
||||
|
||||
@@ -510,10 +531,10 @@ Emitted when a batch is finalized.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| batchIndex `indexed` | uint256 | undefined |
|
||||
| batchHash `indexed` | bytes32 | undefined |
|
||||
| stateRoot | bytes32 | undefined |
|
||||
| withdrawRoot | bytes32 | undefined |
|
||||
| batchIndex `indexed` | uint256 | The index of the batch. |
|
||||
| batchHash `indexed` | bytes32 | The hash of the batch |
|
||||
| stateRoot | bytes32 | The state root on layer 2 after this batch. |
|
||||
| withdrawRoot | bytes32 | The merkle root on layer2 after this batch. |
|
||||
|
||||
### Initialized
|
||||
|
||||
@@ -523,7 +544,7 @@ event Initialized(uint8 version)
|
||||
|
||||
|
||||
|
||||
|
||||
*Triggered when the contract has been initialized or reinitialized.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -556,7 +577,7 @@ event Paused(address account)
|
||||
|
||||
|
||||
|
||||
|
||||
*Emitted when the pause is triggered by `account`.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -578,8 +599,8 @@ revert a pending batch.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| batchIndex `indexed` | uint256 | undefined |
|
||||
| batchHash `indexed` | bytes32 | undefined |
|
||||
| batchIndex `indexed` | uint256 | The index of the batch. |
|
||||
| batchHash `indexed` | bytes32 | The hash of the batch |
|
||||
|
||||
### Unpaused
|
||||
|
||||
@@ -589,7 +610,7 @@ event Unpaused(address account)
|
||||
|
||||
|
||||
|
||||
|
||||
*Emitted when the pause is lifted by `account`.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -652,6 +673,347 @@ Emitted when owner updates the status of sequencer.
|
||||
|
||||
## Errors
|
||||
|
||||
### ErrorAccountIsNotEOA
|
||||
|
||||
```solidity
|
||||
error ErrorAccountIsNotEOA()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the given account is not EOA account.*
|
||||
|
||||
|
||||
### ErrorBatchHeaderLengthTooSmall
|
||||
|
||||
```solidity
|
||||
error ErrorBatchHeaderLengthTooSmall()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the length of batch header is smaller than 89*
|
||||
|
||||
|
||||
### ErrorBatchIsAlreadyCommitted
|
||||
|
||||
```solidity
|
||||
error ErrorBatchIsAlreadyCommitted()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when committing a committed batch.*
|
||||
|
||||
|
||||
### ErrorBatchIsAlreadyVerified
|
||||
|
||||
```solidity
|
||||
error ErrorBatchIsAlreadyVerified()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when finalizing a verified batch.*
|
||||
|
||||
|
||||
### ErrorBatchIsEmpty
|
||||
|
||||
```solidity
|
||||
error ErrorBatchIsEmpty()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when committing empty batch (batch without chunks)*
|
||||
|
||||
|
||||
### ErrorCallPointEvaluationPrecompileFailed
|
||||
|
||||
```solidity
|
||||
error ErrorCallPointEvaluationPrecompileFailed()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when call precompile failed.*
|
||||
|
||||
|
||||
### ErrorCallerIsNotProver
|
||||
|
||||
```solidity
|
||||
error ErrorCallerIsNotProver()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the caller is not prover.*
|
||||
|
||||
|
||||
### ErrorCallerIsNotSequencer
|
||||
|
||||
```solidity
|
||||
error ErrorCallerIsNotSequencer()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the caller is not sequencer.*
|
||||
|
||||
|
||||
### ErrorFoundMultipleBlob
|
||||
|
||||
```solidity
|
||||
error ErrorFoundMultipleBlob()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the transaction has multiple blobs.*
|
||||
|
||||
|
||||
### ErrorGenesisBatchHasNonZeroField
|
||||
|
||||
```solidity
|
||||
error ErrorGenesisBatchHasNonZeroField()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when some fields are not zero in genesis batch.*
|
||||
|
||||
|
||||
### ErrorGenesisBatchImported
|
||||
|
||||
```solidity
|
||||
error ErrorGenesisBatchImported()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when importing genesis batch twice.*
|
||||
|
||||
|
||||
### ErrorGenesisDataHashIsZero
|
||||
|
||||
```solidity
|
||||
error ErrorGenesisDataHashIsZero()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when data hash in genesis batch is zero.*
|
||||
|
||||
|
||||
### ErrorGenesisParentBatchHashIsNonZero
|
||||
|
||||
```solidity
|
||||
error ErrorGenesisParentBatchHashIsNonZero()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the parent batch hash in genesis batch is zero.*
|
||||
|
||||
|
||||
### ErrorIncompleteL2TransactionData
|
||||
|
||||
```solidity
|
||||
error ErrorIncompleteL2TransactionData()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the l2 transaction is incomplete.*
|
||||
|
||||
|
||||
### ErrorIncorrectBatchHash
|
||||
|
||||
```solidity
|
||||
error ErrorIncorrectBatchHash()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the batch hash is incorrect.*
|
||||
|
||||
|
||||
### ErrorIncorrectBatchIndex
|
||||
|
||||
```solidity
|
||||
error ErrorIncorrectBatchIndex()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the batch index is incorrect.*
|
||||
|
||||
|
||||
### ErrorIncorrectBitmapLength
|
||||
|
||||
```solidity
|
||||
error ErrorIncorrectBitmapLength()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the bitmap length is incorrect.*
|
||||
|
||||
|
||||
### ErrorIncorrectChunkLength
|
||||
|
||||
```solidity
|
||||
error ErrorIncorrectChunkLength()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the length of chunk is incorrect.*
|
||||
|
||||
|
||||
### ErrorIncorrectPreviousStateRoot
|
||||
|
||||
```solidity
|
||||
error ErrorIncorrectPreviousStateRoot()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the previous state root doesn't match stored one.*
|
||||
|
||||
|
||||
### ErrorInvalidBatchHeaderVersion
|
||||
|
||||
```solidity
|
||||
error ErrorInvalidBatchHeaderVersion()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the batch header version is invalid.*
|
||||
|
||||
|
||||
### ErrorLastL1MessageSkipped
|
||||
|
||||
```solidity
|
||||
error ErrorLastL1MessageSkipped()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the last message is skipped.*
|
||||
|
||||
|
||||
### ErrorNoBlobFound
|
||||
|
||||
```solidity
|
||||
error ErrorNoBlobFound()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when no blob found in the transaction.*
|
||||
|
||||
|
||||
### ErrorNoBlockInChunk
|
||||
|
||||
```solidity
|
||||
error ErrorNoBlockInChunk()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when no blocks in chunk.*
|
||||
|
||||
|
||||
### ErrorNumTxsLessThanNumL1Msgs
|
||||
|
||||
```solidity
|
||||
error ErrorNumTxsLessThanNumL1Msgs()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the number of transactions is less than number of L1 message in one block.*
|
||||
|
||||
|
||||
### ErrorPreviousStateRootIsZero
|
||||
|
||||
```solidity
|
||||
error ErrorPreviousStateRootIsZero()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the given previous state is zero.*
|
||||
|
||||
|
||||
### ErrorRevertFinalizedBatch
|
||||
|
||||
```solidity
|
||||
error ErrorRevertFinalizedBatch()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when reverting a finialized batch.*
|
||||
|
||||
|
||||
### ErrorRevertNotStartFromEnd
|
||||
|
||||
```solidity
|
||||
error ErrorRevertNotStartFromEnd()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the reverted batches are not in the ending of commited batch chain.*
|
||||
|
||||
|
||||
### ErrorRevertZeroBatches
|
||||
|
||||
```solidity
|
||||
error ErrorRevertZeroBatches()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the number of batches to revert is zero.*
|
||||
|
||||
|
||||
### ErrorStateRootIsZero
|
||||
|
||||
```solidity
|
||||
error ErrorStateRootIsZero()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the given state root is zero.*
|
||||
|
||||
|
||||
### ErrorTooManyTxsInOneChunk
|
||||
|
||||
```solidity
|
||||
error ErrorTooManyTxsInOneChunk()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when a chunk contains too many transactions.*
|
||||
|
||||
|
||||
### ErrorUnexpectedPointEvaluationPrecompileOutput
|
||||
|
||||
```solidity
|
||||
error ErrorUnexpectedPointEvaluationPrecompileOutput()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the precompile output is incorrect.*
|
||||
|
||||
|
||||
### ErrorZeroAddress
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -135,8 +135,8 @@ Emitted when a l2 token is deployed.
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _l1Token `indexed` | address | undefined |
|
||||
| _l2Token `indexed` | address | undefined |
|
||||
| _l1Token `indexed` | address | The address of the l1 token. |
|
||||
| _l2Token `indexed` | address | The address of the l2 token. |
|
||||
|
||||
### OwnershipTransferred
|
||||
|
||||
|
||||
@@ -8,8 +8,8 @@ remappings = [] # a list of remapp
|
||||
libraries = [] # a list of deployed libraries to link against
|
||||
cache = true # whether to cache builds or not
|
||||
force = true # whether to ignore the cache (clean build)
|
||||
evm_version = 'london' # the evm version (by hardfork name)
|
||||
solc_version = '0.8.16' # override for the solc version (setting this ignores `auto_detect_solc`)
|
||||
# evm_version = 'london' # the evm version (by hardfork name)
|
||||
solc_version = '0.8.24' # override for the solc version (setting this ignores `auto_detect_solc`)
|
||||
optimizer = true # enable or disable the solc optimizer
|
||||
optimizer_runs = 200 # the number of optimizer runs
|
||||
verbosity = 2 # the verbosity of tests
|
||||
@@ -20,7 +20,6 @@ sender = '0x00a329c0648769a73afac7f9381e08fb43dbea72' # the address of `
|
||||
tx_origin = '0x00a329c0648769a73afac7f9381e08fb43dbea72' # the address of `tx.origin` in tests
|
||||
initial_balance = '0xffffffffffffffffffffffff' # the initial balance of the test contract
|
||||
block_number = 0 # the block number we are at in tests
|
||||
chain_id = 99 # the chain id we are on in tests
|
||||
gas_limit = 9223372036854775807 # the gas limit in tests
|
||||
gas_price = 0 # the gas price (in wei) in tests
|
||||
block_base_fee_per_gas = 0 # the base fee (in wei) in tests
|
||||
|
||||
@@ -2,8 +2,9 @@ import * as dotenv from "dotenv";
|
||||
|
||||
import { HardhatUserConfig, subtask } from "hardhat/config";
|
||||
import * as toml from "toml";
|
||||
import "@nomiclabs/hardhat-etherscan";
|
||||
import "@nomiclabs/hardhat-waffle";
|
||||
import "@nomicfoundation/hardhat-verify";
|
||||
import "@nomicfoundation/hardhat-ethers";
|
||||
import "@nomicfoundation/hardhat-chai-matchers";
|
||||
import "@typechain/hardhat";
|
||||
import "@primitivefi/hardhat-dodoc";
|
||||
import "hardhat-gas-reporter";
|
||||
@@ -13,16 +14,10 @@ import { TASK_COMPILE_SOLIDITY_GET_SOURCE_PATHS } from "hardhat/builtin-tasks/ta
|
||||
|
||||
dotenv.config();
|
||||
|
||||
// default values here to avoid failures when running hardhat
|
||||
const RINKEBY_RPC = process.env.RINKEBY_RPC || "1".repeat(32);
|
||||
const SCROLL_L1_RPC = process.env.SCROLL_L1_RPC || "1".repeat(32);
|
||||
const SCROLL_L2_RPC = process.env.SCROLL_L2_RPC || "1".repeat(32);
|
||||
|
||||
const RINKEBY_PRIVATE_KEY = process.env.RINKEBY_PRIVATE_KEY || "1".repeat(64);
|
||||
const L1_DEPLOYER_PRIVATE_KEY = process.env.L1_DEPLOYER_PRIVATE_KEY || "1".repeat(64);
|
||||
const L2_DEPLOYER_PRIVATE_KEY = process.env.L2_DEPLOYER_PRIVATE_KEY || "1".repeat(64);
|
||||
|
||||
const SOLC_DEFAULT = "0.8.16";
|
||||
const SOLC_DEFAULT = "0.8.24";
|
||||
|
||||
// try use forge config
|
||||
let foundry: any;
|
||||
@@ -45,29 +40,30 @@ subtask(TASK_COMPILE_SOLIDITY_GET_SOURCE_PATHS).setAction(async (_, __, runSuper
|
||||
|
||||
const config: HardhatUserConfig = {
|
||||
solidity: {
|
||||
version: foundry.default?.solc || SOLC_DEFAULT,
|
||||
version: foundry.default?.solc_version || SOLC_DEFAULT,
|
||||
settings: {
|
||||
optimizer: {
|
||||
enabled: foundry.default?.optimizer || true,
|
||||
runs: foundry.default?.optimizer_runs || 200,
|
||||
},
|
||||
evmVersion: "cancun",
|
||||
},
|
||||
},
|
||||
networks: {
|
||||
rinkeby: {
|
||||
url: RINKEBY_RPC,
|
||||
accounts: [RINKEBY_PRIVATE_KEY],
|
||||
},
|
||||
l1geth: {
|
||||
url: SCROLL_L1_RPC,
|
||||
gasPrice: 20000000000,
|
||||
gasMultiplier: 1.1,
|
||||
ethereum: {
|
||||
url: "https://1rpc.io/eth",
|
||||
accounts: [L1_DEPLOYER_PRIVATE_KEY],
|
||||
},
|
||||
l2geth: {
|
||||
url: SCROLL_L2_RPC,
|
||||
gasPrice: 20000000000,
|
||||
gasMultiplier: 1.1,
|
||||
sepolia: {
|
||||
url: "https://1rpc.io/sepolia",
|
||||
accounts: [L1_DEPLOYER_PRIVATE_KEY],
|
||||
},
|
||||
scroll: {
|
||||
url: "https://rpc.scroll.io",
|
||||
accounts: [L2_DEPLOYER_PRIVATE_KEY],
|
||||
},
|
||||
scroll_sepolia: {
|
||||
url: "https://sepolia-rpc.scroll.io",
|
||||
accounts: [L2_DEPLOYER_PRIVATE_KEY],
|
||||
},
|
||||
},
|
||||
@@ -76,13 +72,40 @@ const config: HardhatUserConfig = {
|
||||
sources: "./src",
|
||||
tests: "./integration-test",
|
||||
},
|
||||
typechain: {
|
||||
outDir: "./typechain",
|
||||
target: "ethers-v6",
|
||||
},
|
||||
gasReporter: {
|
||||
enabled: process.env.REPORT_GAS !== undefined,
|
||||
excludeContracts: ["src/test"],
|
||||
currency: "USD",
|
||||
},
|
||||
etherscan: {
|
||||
apiKey: process.env.ETHERSCAN_API_KEY,
|
||||
apiKey: {
|
||||
ethereum: process.env.ETHERSCAN_API_KEY || "",
|
||||
sepolia: process.env.ETHERSCAN_API_KEY || "",
|
||||
scroll: process.env.SCROLLSCAN_API_KEY || "",
|
||||
scroll_sepolia: process.env.SCROLLSCAN_API_KEY || "",
|
||||
},
|
||||
customChains: [
|
||||
{
|
||||
network: "scroll",
|
||||
chainId: 534352,
|
||||
urls: {
|
||||
apiURL: "https://api.scrollscan.com/api",
|
||||
browserURL: "https://www.scrollscan.com/",
|
||||
},
|
||||
},
|
||||
{
|
||||
network: "scroll_sepolia",
|
||||
chainId: 534351,
|
||||
urls: {
|
||||
apiURL: "https://api-sepolia.scrollscan.com/api",
|
||||
browserURL: "https://sepolia.scrollscan.com/",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
mocha: {
|
||||
timeout: 10000000,
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { HardhatEthersSigner, SignerWithAddress } from "@nomicfoundation/hardhat-ethers/signers";
|
||||
import { expect } from "chai";
|
||||
import { BigNumberish, BytesLike, constants } from "ethers";
|
||||
import { BigNumberish, BytesLike, MaxUint256, ZeroAddress, getBytes } from "ethers";
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
import { EnforcedTxGateway, L1MessageQueue, L2GasPriceOracle, MockCaller } from "../typechain";
|
||||
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
|
||||
import { arrayify } from "ethers/lib/utils";
|
||||
|
||||
describe("EnforcedTxGateway.spec", async () => {
|
||||
let deployer: SignerWithAddress;
|
||||
let feeVault: SignerWithAddress;
|
||||
let signer: SignerWithAddress;
|
||||
let deployer: HardhatEthersSigner;
|
||||
let feeVault: HardhatEthersSigner;
|
||||
let signer: HardhatEthersSigner;
|
||||
|
||||
let caller: MockCaller;
|
||||
let gateway: EnforcedTxGateway;
|
||||
@@ -21,10 +21,8 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
|
||||
const Factory = await ethers.getContractFactory(name, deployer);
|
||||
const impl = args.length > 0 ? await Factory.deploy(...args) : await Factory.deploy();
|
||||
await impl.deployed();
|
||||
const proxy = await TransparentUpgradeableProxy.deploy(impl.address, admin, "0x");
|
||||
await proxy.deployed();
|
||||
return proxy.address;
|
||||
const proxy = await TransparentUpgradeableProxy.deploy(impl.getAddress(), admin, "0x");
|
||||
return proxy.getAddress();
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
@@ -32,66 +30,61 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
|
||||
const ProxyAdmin = await ethers.getContractFactory("ProxyAdmin", deployer);
|
||||
const admin = await ProxyAdmin.deploy();
|
||||
await admin.deployed();
|
||||
|
||||
gateway = await ethers.getContractAt(
|
||||
"EnforcedTxGateway",
|
||||
await deployProxy("EnforcedTxGateway", admin.address, []),
|
||||
await deployProxy("EnforcedTxGateway", await admin.getAddress(), []),
|
||||
deployer
|
||||
);
|
||||
|
||||
queue = await ethers.getContractAt(
|
||||
"L1MessageQueue",
|
||||
await deployProxy("L1MessageQueue", admin.address, [deployer.address, deployer.address, gateway.address]),
|
||||
await deployProxy("L1MessageQueue", await admin.getAddress(), [
|
||||
deployer.address,
|
||||
deployer.address,
|
||||
await gateway.getAddress(),
|
||||
]),
|
||||
deployer
|
||||
);
|
||||
|
||||
oracle = await ethers.getContractAt(
|
||||
"L2GasPriceOracle",
|
||||
await deployProxy("L2GasPriceOracle", admin.address, []),
|
||||
await deployProxy("L2GasPriceOracle", await admin.getAddress(), []),
|
||||
deployer
|
||||
);
|
||||
|
||||
const MockCaller = await ethers.getContractFactory("MockCaller", deployer);
|
||||
caller = await MockCaller.deploy();
|
||||
await caller.deployed();
|
||||
|
||||
await queue.initialize(
|
||||
constants.AddressZero,
|
||||
constants.AddressZero,
|
||||
constants.AddressZero,
|
||||
oracle.address,
|
||||
10000000
|
||||
);
|
||||
await gateway.initialize(queue.address, feeVault.address);
|
||||
await queue.initialize(ZeroAddress, ZeroAddress, ZeroAddress, oracle.getAddress(), 10000000);
|
||||
await gateway.initialize(queue.getAddress(), feeVault.address);
|
||||
await oracle.initialize(21000, 51000, 8, 16);
|
||||
|
||||
const Whitelist = await ethers.getContractFactory("Whitelist", deployer);
|
||||
const whitelist = await Whitelist.deploy(deployer.address);
|
||||
await whitelist.deployed();
|
||||
|
||||
await whitelist.updateWhitelistStatus([deployer.address], true);
|
||||
await oracle.updateWhitelist(whitelist.address);
|
||||
await oracle.updateWhitelist(whitelist.getAddress());
|
||||
await oracle.setL2BaseFee(1);
|
||||
});
|
||||
|
||||
context("auth", async () => {
|
||||
it("should initialize correctly", async () => {
|
||||
expect(await gateway.owner()).to.eq(deployer.address);
|
||||
expect(await gateway.messageQueue()).to.eq(queue.address);
|
||||
expect(await gateway.messageQueue()).to.eq(await queue.getAddress());
|
||||
expect(await gateway.feeVault()).to.eq(feeVault.address);
|
||||
expect(await gateway.paused()).to.eq(false);
|
||||
});
|
||||
|
||||
it("should revert, when initialize again", async () => {
|
||||
await expect(gateway.initialize(constants.AddressZero, constants.AddressZero)).to.revertedWith(
|
||||
await expect(gateway.initialize(ZeroAddress, ZeroAddress)).to.revertedWith(
|
||||
"Initializable: contract is already initialized"
|
||||
);
|
||||
});
|
||||
|
||||
context("#updateFeeVault", async () => {
|
||||
it("should revert, when non-owner call", async () => {
|
||||
await expect(gateway.connect(signer).updateFeeVault(constants.AddressZero)).to.revertedWith(
|
||||
await expect(gateway.connect(signer).updateFeeVault(ZeroAddress)).to.revertedWith(
|
||||
"Ownable: caller is not the owner"
|
||||
);
|
||||
});
|
||||
@@ -129,13 +122,13 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
});
|
||||
|
||||
it("should revert, when call is not EOA", async () => {
|
||||
const tx = await gateway.populateTransaction["sendTransaction(address,uint256,uint256,bytes)"](
|
||||
const calldata = gateway.interface.encodeFunctionData("sendTransaction(address,uint256,uint256,bytes)", [
|
||||
signer.address,
|
||||
0,
|
||||
0,
|
||||
"0x"
|
||||
);
|
||||
await expect(caller.callTarget(gateway.address, tx.data!)).to.revertedWith(
|
||||
"0x",
|
||||
]);
|
||||
await expect(caller.callTarget(gateway.getAddress(), calldata)).to.revertedWith(
|
||||
"Only EOA senders are allowed to send enforced transaction"
|
||||
);
|
||||
});
|
||||
@@ -145,12 +138,12 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
await expect(
|
||||
gateway
|
||||
.connect(signer)
|
||||
["sendTransaction(address,uint256,uint256,bytes)"](signer.address, 0, 1000000, "0x", { value: fee.sub(1) })
|
||||
["sendTransaction(address,uint256,uint256,bytes)"](signer.address, 0, 1000000, "0x", { value: fee - 1n })
|
||||
).to.revertedWith("Insufficient value for fee");
|
||||
});
|
||||
|
||||
it("should revert, when failed to deduct the fee", async () => {
|
||||
await gateway.updateFeeVault(gateway.address);
|
||||
await gateway.updateFeeVault(gateway.getAddress());
|
||||
const fee = await queue.estimateCrossDomainMessageFee(1000000);
|
||||
await expect(
|
||||
gateway
|
||||
@@ -170,7 +163,7 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
.to.emit(queue, "QueueTransaction")
|
||||
.withArgs(signer.address, deployer.address, 0, 0, 1000000, "0x");
|
||||
const feeVaultBalanceAfter = await ethers.provider.getBalance(feeVault.address);
|
||||
expect(feeVaultBalanceAfter.sub(feeVaultBalanceBefore)).to.eq(fee);
|
||||
expect(feeVaultBalanceAfter - feeVaultBalanceBefore).to.eq(fee);
|
||||
});
|
||||
|
||||
it("should succeed, with refund", async () => {
|
||||
@@ -179,17 +172,15 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
const signerBalanceBefore = await ethers.provider.getBalance(signer.address);
|
||||
const tx = await gateway
|
||||
.connect(signer)
|
||||
["sendTransaction(address,uint256,uint256,bytes)"](deployer.address, 0, 1000000, "0x", { value: fee.add(100) });
|
||||
["sendTransaction(address,uint256,uint256,bytes)"](deployer.address, 0, 1000000, "0x", { value: fee + 100n });
|
||||
await expect(tx)
|
||||
.to.emit(queue, "QueueTransaction")
|
||||
.withArgs(signer.address, deployer.address, 0, 0, 1000000, "0x");
|
||||
const receipt = await tx.wait();
|
||||
const feeVaultBalanceAfter = await ethers.provider.getBalance(feeVault.address);
|
||||
const signerBalanceAfter = await ethers.provider.getBalance(signer.address);
|
||||
expect(feeVaultBalanceAfter.sub(feeVaultBalanceBefore)).to.eq(fee);
|
||||
expect(signerBalanceBefore.sub(signerBalanceAfter)).to.eq(
|
||||
receipt.gasUsed.mul(receipt.effectiveGasPrice).add(fee)
|
||||
);
|
||||
expect(feeVaultBalanceAfter - feeVaultBalanceBefore).to.eq(fee);
|
||||
expect(signerBalanceBefore - signerBalanceAfter).to.eq(receipt!.gasUsed * receipt!.gasPrice + fee);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -203,19 +194,19 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
) => {
|
||||
const enforcedTx = {
|
||||
sender: signer.address,
|
||||
target: target,
|
||||
value: value,
|
||||
gasLimit: gasLimit,
|
||||
data: arrayify(data),
|
||||
target,
|
||||
value,
|
||||
gasLimit,
|
||||
data: getBytes(data),
|
||||
nonce: await gateway.nonces(signer.address),
|
||||
deadline: constants.MaxUint256,
|
||||
deadline: MaxUint256,
|
||||
};
|
||||
|
||||
const domain = {
|
||||
name: "EnforcedTxGateway",
|
||||
version: "1",
|
||||
chainId: (await ethers.provider.getNetwork()).chainId,
|
||||
verifyingContract: gateway.address,
|
||||
verifyingContract: await gateway.getAddress(),
|
||||
};
|
||||
|
||||
const types = {
|
||||
@@ -251,7 +242,7 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
],
|
||||
};
|
||||
|
||||
const signature = await signer._signTypedData(domain, types, enforcedTx);
|
||||
const signature = await signer.signTypedData(domain, types, enforcedTx);
|
||||
return signature;
|
||||
};
|
||||
|
||||
@@ -266,15 +257,15 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
0,
|
||||
0,
|
||||
"0x",
|
||||
constants.MaxUint256,
|
||||
MaxUint256,
|
||||
"0x",
|
||||
constants.AddressZero
|
||||
ZeroAddress
|
||||
)
|
||||
).to.revertedWith("Pausable: paused");
|
||||
});
|
||||
|
||||
it("should revert, when signature expired", async () => {
|
||||
const timestamp = (await ethers.provider.getBlock("latest")).timestamp;
|
||||
const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp;
|
||||
await expect(
|
||||
gateway
|
||||
.connect(deployer)
|
||||
@@ -286,7 +277,7 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
"0x",
|
||||
timestamp - 1,
|
||||
"0x",
|
||||
constants.AddressZero
|
||||
ZeroAddress
|
||||
)
|
||||
).to.revertedWith("signature expired");
|
||||
});
|
||||
@@ -302,9 +293,9 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
0,
|
||||
0,
|
||||
"0x",
|
||||
constants.MaxUint256,
|
||||
MaxUint256,
|
||||
signature,
|
||||
constants.AddressZero
|
||||
ZeroAddress
|
||||
)
|
||||
).to.revertedWith("Incorrect signature");
|
||||
});
|
||||
@@ -321,16 +312,16 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
0,
|
||||
1000000,
|
||||
"0x",
|
||||
constants.MaxUint256,
|
||||
MaxUint256,
|
||||
signature,
|
||||
signer.address,
|
||||
{ value: fee.sub(1) }
|
||||
{ value: fee - 1n }
|
||||
)
|
||||
).to.revertedWith("Insufficient value for fee");
|
||||
});
|
||||
|
||||
it("should revert, when failed to deduct the fee", async () => {
|
||||
await gateway.updateFeeVault(gateway.address);
|
||||
await gateway.updateFeeVault(gateway.getAddress());
|
||||
const signature = await getSignature(signer, signer.address, 0, 1000000, "0x");
|
||||
const fee = await queue.estimateCrossDomainMessageFee(1000000);
|
||||
await expect(
|
||||
@@ -342,7 +333,7 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
0,
|
||||
1000000,
|
||||
"0x",
|
||||
constants.MaxUint256,
|
||||
MaxUint256,
|
||||
signature,
|
||||
signer.address,
|
||||
{ value: fee }
|
||||
@@ -364,7 +355,7 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
0,
|
||||
1000000,
|
||||
"0x",
|
||||
constants.MaxUint256,
|
||||
MaxUint256,
|
||||
signature,
|
||||
signer.address,
|
||||
{ value: fee }
|
||||
@@ -374,7 +365,7 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
.withArgs(signer.address, deployer.address, 0, 0, 1000000, "0x");
|
||||
expect(await gateway.nonces(signer.address)).to.eq(1);
|
||||
const feeVaultBalanceAfter = await ethers.provider.getBalance(feeVault.address);
|
||||
expect(feeVaultBalanceAfter.sub(feeVaultBalanceBefore)).to.eq(fee);
|
||||
expect(feeVaultBalanceAfter - feeVaultBalanceBefore).to.eq(fee);
|
||||
|
||||
// use the same nonce to sign should fail
|
||||
await expect(
|
||||
@@ -386,7 +377,7 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
0,
|
||||
1000000,
|
||||
"0x",
|
||||
constants.MaxUint256,
|
||||
MaxUint256,
|
||||
signature,
|
||||
signer.address,
|
||||
{ value: fee }
|
||||
@@ -409,10 +400,10 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
0,
|
||||
1000000,
|
||||
"0x",
|
||||
constants.MaxUint256,
|
||||
MaxUint256,
|
||||
signature,
|
||||
signer.address,
|
||||
{ value: fee.add(100) }
|
||||
{ value: fee + 100n }
|
||||
)
|
||||
)
|
||||
.to.emit(queue, "QueueTransaction")
|
||||
@@ -420,8 +411,8 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
expect(await gateway.nonces(signer.address)).to.eq(1);
|
||||
const feeVaultBalanceAfter = await ethers.provider.getBalance(feeVault.address);
|
||||
const signerBalanceAfter = await ethers.provider.getBalance(signer.address);
|
||||
expect(feeVaultBalanceAfter.sub(feeVaultBalanceBefore)).to.eq(fee);
|
||||
expect(signerBalanceAfter.sub(signerBalanceBefore)).to.eq(100);
|
||||
expect(feeVaultBalanceAfter - feeVaultBalanceBefore).to.eq(fee);
|
||||
expect(signerBalanceAfter - signerBalanceBefore).to.eq(100n);
|
||||
|
||||
// use the same nonce to sign should fail
|
||||
await expect(
|
||||
@@ -433,10 +424,10 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
0,
|
||||
1000000,
|
||||
"0x",
|
||||
constants.MaxUint256,
|
||||
MaxUint256,
|
||||
signature,
|
||||
signer.address,
|
||||
{ value: fee.add(100) }
|
||||
{ value: fee + 100n }
|
||||
)
|
||||
).to.revertedWith("Incorrect signature");
|
||||
});
|
||||
@@ -453,10 +444,10 @@ describe("EnforcedTxGateway.spec", async () => {
|
||||
0,
|
||||
1000000,
|
||||
"0x1234",
|
||||
constants.MaxUint256,
|
||||
MaxUint256,
|
||||
signature,
|
||||
gateway.address,
|
||||
{ value: fee.add(100) }
|
||||
gateway.getAddress(),
|
||||
{ value: fee + 100n }
|
||||
)
|
||||
).to.revertedWith("Failed to refund the fee");
|
||||
});
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
/* eslint-disable node/no-missing-import */
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
|
||||
import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers";
|
||||
import { expect } from "chai";
|
||||
import { BigNumber, BigNumberish, ContractTransaction, constants } from "ethers";
|
||||
import { keccak256 } from "ethers/lib/utils";
|
||||
import { BigNumberish, ContractTransactionResponse, MaxUint256, keccak256, toQuantity } from "ethers";
|
||||
import { ethers, network } from "hardhat";
|
||||
|
||||
import {
|
||||
@@ -24,31 +23,27 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const L2_MESSAGE_QUEUE = "0x5300000000000000000000000000000000000000";
|
||||
const SCROLL_CHAIN = "0xa13BAF47339d63B743e7Da8741db5456DAc1E556";
|
||||
|
||||
let deployer: SignerWithAddress;
|
||||
let deployer: HardhatEthersSigner;
|
||||
|
||||
let proxyAdmin: ProxyAdmin;
|
||||
|
||||
const mockERC20Balance = async (tokenAddress: string, balance: BigNumber, slot: BigNumberish) => {
|
||||
const mockERC20Balance = async (tokenAddress: string, balance: bigint, slot: BigNumberish) => {
|
||||
const storageSlot = keccak256(
|
||||
ethers.utils.defaultAbiCoder.encode(["address", "uint256"], [deployer.address, slot])
|
||||
ethers.AbiCoder.defaultAbiCoder().encode(["address", "uint256"], [deployer.address, slot])
|
||||
);
|
||||
await ethers.provider.send("hardhat_setStorageAt", [
|
||||
tokenAddress,
|
||||
storageSlot,
|
||||
ethers.utils.hexlify(ethers.utils.zeroPad(balance.toHexString(), 32)),
|
||||
]);
|
||||
await ethers.provider.send("hardhat_setStorageAt", [tokenAddress, storageSlot, toQuantity(balance)]);
|
||||
const token = await ethers.getContractAt("MockERC20", tokenAddress, deployer);
|
||||
expect(await token.balanceOf(deployer.address)).to.eq(balance);
|
||||
};
|
||||
|
||||
const mockETHBalance = async (balance: BigNumber) => {
|
||||
await network.provider.send("hardhat_setBalance", [deployer.address, balance.toHexString()]);
|
||||
expect(await deployer.getBalance()).to.eq(balance);
|
||||
const mockETHBalance = async (balance: bigint) => {
|
||||
await network.provider.send("hardhat_setBalance", [deployer.address, toQuantity(balance)]);
|
||||
expect(await ethers.provider.getBalance(deployer.address)).to.eq(balance);
|
||||
};
|
||||
|
||||
const showGasUsage = async (tx: ContractTransaction, desc: string) => {
|
||||
const showGasUsage = async (tx: ContractTransactionResponse, desc: string) => {
|
||||
const receipt = await tx.wait();
|
||||
console.log(`${desc}: GasUsed[${receipt.gasUsed}]`);
|
||||
console.log(`${desc}: GasUsed[${receipt!.gasUsed}]`);
|
||||
};
|
||||
|
||||
context("L1 upgrade", async () => {
|
||||
@@ -59,7 +54,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
|
||||
beforeEach(async () => {
|
||||
// fork network
|
||||
const provider = new ethers.providers.JsonRpcProvider("https://rpc.ankr.com/eth");
|
||||
const provider = new ethers.JsonRpcProvider("https://rpc.ankr.com/eth");
|
||||
if (!forkBlock) {
|
||||
forkBlock = (await provider.getBlockNumber()) - 10;
|
||||
}
|
||||
@@ -81,14 +76,14 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
|
||||
// mock eth balance
|
||||
deployer = await ethers.getSigner("0x1100000000000000000000000000000000000011");
|
||||
await mockETHBalance(ethers.utils.parseEther("1000"));
|
||||
await mockETHBalance(ethers.parseEther("1000"));
|
||||
|
||||
// mock owner of proxy admin
|
||||
proxyAdmin = await ethers.getContractAt("ProxyAdmin", "0xEB803eb3F501998126bf37bB823646Ed3D59d072", deployer);
|
||||
await ethers.provider.send("hardhat_setStorageAt", [
|
||||
proxyAdmin.address,
|
||||
await proxyAdmin.getAddress(),
|
||||
"0x0",
|
||||
ethers.utils.hexlify(ethers.utils.zeroPad(deployer.address, 32)),
|
||||
ethers.AbiCoder.defaultAbiCoder().encode(["address"], [deployer.address]),
|
||||
]);
|
||||
expect(await proxyAdmin.owner()).to.eq(deployer.address);
|
||||
|
||||
@@ -107,9 +102,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const ScrollChain = await ethers.getContractFactory("ScrollChain", deployer);
|
||||
await proxyAdmin.upgrade(
|
||||
L1_MESSENGER,
|
||||
(
|
||||
await L1ScrollMessenger.deploy(L2_MESSENGER, SCROLL_CHAIN, L1_MESSAGE_QUEUE)
|
||||
).address
|
||||
(await L1ScrollMessenger.deploy(L2_MESSENGER, SCROLL_CHAIN, L1_MESSAGE_QUEUE)).getAddress()
|
||||
);
|
||||
await proxyAdmin.upgrade(
|
||||
L1_MESSAGE_QUEUE,
|
||||
@@ -119,14 +112,12 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
SCROLL_CHAIN,
|
||||
"0x72CAcBcfDe2d1e19122F8A36a4d6676cd39d7A5d"
|
||||
)
|
||||
).address
|
||||
).getAddress()
|
||||
);
|
||||
await queue.initializeV2();
|
||||
await proxyAdmin.upgrade(
|
||||
SCROLL_CHAIN,
|
||||
(
|
||||
await ScrollChain.deploy(534352, L1_MESSAGE_QUEUE, "0xA2Ab526e5C5491F10FC05A55F064BF9F7CEf32a0")
|
||||
).address
|
||||
(await ScrollChain.deploy(534352, L1_MESSAGE_QUEUE, "0xA2Ab526e5C5491F10FC05A55F064BF9F7CEf32a0")).getAddress()
|
||||
);
|
||||
};
|
||||
|
||||
@@ -136,40 +127,40 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const L1ETHGateway = await ethers.getContractFactory("L1ETHGateway", deployer);
|
||||
const impl = await L1ETHGateway.deploy(L2_GATEWAY, L1_ROUTER, L1_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L1ETHGateway", L1_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseEther("1");
|
||||
const amountIn = ethers.parseEther("1");
|
||||
const fee = await queue.estimateCrossDomainMessageFee(1e6);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
await gateway["depositETH(uint256,uint256)"](amountIn, 1e6, { value: amountIn.add(fee) }),
|
||||
await gateway["depositETH(uint256,uint256)"](amountIn, 1e6, { value: amountIn + fee }),
|
||||
"L1ETHGateway.depositETH before upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await router["depositETH(uint256,uint256)"](amountIn, 1e6, { value: amountIn.add(fee) }),
|
||||
await router["depositETH(uint256,uint256)"](amountIn, 1e6, { value: amountIn + fee }),
|
||||
"L1GatewayRouter.depositETH before upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await messenger["sendMessage(address,uint256,bytes,uint256)"](deployer.address, amountIn, "0x", 1e6, {
|
||||
value: amountIn.add(fee),
|
||||
value: amountIn + fee,
|
||||
}),
|
||||
"L1ScrollMessenger.sendMessage before upgrade"
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL1(L1_GATEWAY, impl.address);
|
||||
await upgradeL1(L1_GATEWAY, await impl.getAddress());
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
await gateway["depositETH(uint256,uint256)"](amountIn, 1e6, { value: amountIn.add(fee) }),
|
||||
await gateway["depositETH(uint256,uint256)"](amountIn, 1e6, { value: amountIn + fee }),
|
||||
"L1ETHGateway.depositETH after upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await router["depositETH(uint256,uint256)"](amountIn, 1e6, { value: amountIn.add(fee) }),
|
||||
await router["depositETH(uint256,uint256)"](amountIn, 1e6, { value: amountIn + fee }),
|
||||
"L1GatewayRouter.depositETH after upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await messenger["sendMessage(address,uint256,bytes,uint256)"](deployer.address, amountIn, "0x", 1e6, {
|
||||
value: amountIn.add(fee),
|
||||
value: amountIn + fee,
|
||||
}),
|
||||
"L1ScrollMessenger.sendMessage after upgrade"
|
||||
);
|
||||
@@ -183,12 +174,12 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const L1WETHGateway = await ethers.getContractFactory("L1WETHGateway", deployer);
|
||||
const impl = await L1WETHGateway.deploy(L1_WETH, L2_WETH, L2_GATEWAY, L1_ROUTER, L1_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L1WETHGateway", L1_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseEther("1");
|
||||
const amountIn = ethers.parseEther("1");
|
||||
const fee = await queue.estimateCrossDomainMessageFee(1e6);
|
||||
const token = await ethers.getContractAt("MockERC20", L1_WETH, deployer);
|
||||
await mockERC20Balance(token.address, amountIn.mul(10), 3);
|
||||
await token.approve(L1_GATEWAY, constants.MaxUint256);
|
||||
await token.approve(L1_ROUTER, constants.MaxUint256);
|
||||
await mockERC20Balance(await token.getAddress(), amountIn * 10n, 3);
|
||||
await token.approve(L1_GATEWAY, MaxUint256);
|
||||
await token.approve(L1_ROUTER, MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
@@ -201,7 +192,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL1(L1_GATEWAY, impl.address);
|
||||
await upgradeL1(L1_GATEWAY, await impl.getAddress());
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
@@ -227,12 +218,12 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
"0x66e5312EDeEAef6e80759A0F789e7914Fb401484"
|
||||
);
|
||||
const gateway = await ethers.getContractAt("L1StandardERC20Gateway", L1_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseUnits("1", 6);
|
||||
const amountIn = ethers.parseUnits("1", 6);
|
||||
const fee = await queue.estimateCrossDomainMessageFee(1e6);
|
||||
const token = await ethers.getContractAt("MockERC20", L1_USDT, deployer);
|
||||
await mockERC20Balance(token.address, amountIn.mul(10), 2);
|
||||
await token.approve(L1_GATEWAY, constants.MaxUint256);
|
||||
await token.approve(L1_ROUTER, constants.MaxUint256);
|
||||
await mockERC20Balance(await token.getAddress(), amountIn * 10n, 2);
|
||||
await token.approve(L1_GATEWAY, MaxUint256);
|
||||
await token.approve(L1_ROUTER, MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
@@ -245,7 +236,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL1(L1_GATEWAY, impl.address);
|
||||
await upgradeL1(L1_GATEWAY, await impl.getAddress());
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
@@ -265,12 +256,12 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const L1CustomERC20Gateway = await ethers.getContractFactory("L1CustomERC20Gateway", deployer);
|
||||
const impl = await L1CustomERC20Gateway.deploy(L2_GATEWAY, L1_ROUTER, L1_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L1CustomERC20Gateway", L1_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseUnits("1", 18);
|
||||
const amountIn = ethers.parseUnits("1", 18);
|
||||
const fee = await queue.estimateCrossDomainMessageFee(1e6);
|
||||
const token = await ethers.getContractAt("MockERC20", L1_DAI, deployer);
|
||||
await mockERC20Balance(token.address, amountIn.mul(10), 2);
|
||||
await token.approve(L1_GATEWAY, constants.MaxUint256);
|
||||
await token.approve(L1_ROUTER, constants.MaxUint256);
|
||||
await mockERC20Balance(await token.getAddress(), amountIn * 10n, 2);
|
||||
await token.approve(L1_GATEWAY, MaxUint256);
|
||||
await token.approve(L1_ROUTER, MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
@@ -283,7 +274,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL1(L1_GATEWAY, impl.address);
|
||||
await upgradeL1(L1_GATEWAY, await impl.getAddress());
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
@@ -304,12 +295,12 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const L1USDCGateway = await ethers.getContractFactory("L1USDCGateway", deployer);
|
||||
const impl = await L1USDCGateway.deploy(L1_USDC, L2_USDC, L2_GATEWAY, L1_ROUTER, L1_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L1USDCGateway", L1_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseUnits("1", 6);
|
||||
const amountIn = ethers.parseUnits("1", 6);
|
||||
const fee = await queue.estimateCrossDomainMessageFee(1e6);
|
||||
const token = await ethers.getContractAt("MockERC20", L1_USDC, deployer);
|
||||
await mockERC20Balance(token.address, amountIn.mul(10), 9);
|
||||
await token.approve(L1_GATEWAY, constants.MaxUint256);
|
||||
await token.approve(L1_ROUTER, constants.MaxUint256);
|
||||
await mockERC20Balance(await token.getAddress(), amountIn * 10n, 9);
|
||||
await token.approve(L1_GATEWAY, MaxUint256);
|
||||
await token.approve(L1_ROUTER, MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
@@ -322,7 +313,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL1(L1_GATEWAY, impl.address);
|
||||
await upgradeL1(L1_GATEWAY, await impl.getAddress());
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
@@ -334,6 +325,46 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
"L1GatewayRouter.depositERC20 USDC after upgrade"
|
||||
);
|
||||
});
|
||||
|
||||
it.skip("should succeed on L1LidoGateway", async () => {
|
||||
const L1_WSTETH = "0x7f39C581F595B53c5cb19bD0b3f8dA6c935E2Ca0";
|
||||
const L2_WSTETH = "0xf610A9dfB7C89644979b4A0f27063E9e7d7Cda32";
|
||||
const L1_GATEWAY = "0x6625C6332c9F91F2D27c304E729B86db87A3f504";
|
||||
const L2_GATEWAY = "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9";
|
||||
const L1LidoGateway = await ethers.getContractFactory("L1LidoGateway", deployer);
|
||||
const impl = await L1LidoGateway.deploy(L1_WSTETH, L2_WSTETH, L2_GATEWAY, L1_ROUTER, L1_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L1LidoGateway", L1_GATEWAY, deployer);
|
||||
const amountIn = ethers.parseUnits("1", 6);
|
||||
const fee = await queue.estimateCrossDomainMessageFee(1e6);
|
||||
const token = await ethers.getContractAt("MockERC20", L1_WSTETH, deployer);
|
||||
await mockERC20Balance(await token.getAddress(), amountIn * 10n, 0);
|
||||
await token.approve(L1_GATEWAY, MaxUint256);
|
||||
await token.approve(L1_ROUTER, MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
await gateway["depositERC20(address,uint256,uint256)"](L1_WSTETH, amountIn, 1e6, { value: fee }),
|
||||
"L1LidoGateway.depositERC20 wstETH before upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await router["depositERC20(address,uint256,uint256)"](L1_WSTETH, amountIn, 1e6, { value: fee }),
|
||||
"L1GatewayRouter.depositERC20 wstETH before upgrade"
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL1(L1_GATEWAY, await impl.getAddress());
|
||||
await gateway.initializeV2(deployer.address, deployer.address, deployer.address, deployer.address);
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
await gateway["depositERC20(address,uint256,uint256)"](L1_WSTETH, amountIn, 1e6, { value: fee }),
|
||||
"L1LidoGateway.depositERC20 wstETH after upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await router["depositERC20(address,uint256,uint256)"](L1_WSTETH, amountIn, 1e6, { value: fee }),
|
||||
"L1GatewayRouter.depositERC20 wstETH after upgrade"
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
context("L2 upgrade", async () => {
|
||||
@@ -343,7 +374,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
|
||||
beforeEach(async () => {
|
||||
// fork network
|
||||
const provider = new ethers.providers.JsonRpcProvider("https://rpc.scroll.io");
|
||||
const provider = new ethers.JsonRpcProvider("https://rpc.scroll.io");
|
||||
if (!forkBlock) {
|
||||
forkBlock = (await provider.getBlockNumber()) - 31;
|
||||
}
|
||||
@@ -365,14 +396,14 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
|
||||
// mock eth balance
|
||||
deployer = await ethers.getSigner("0x1100000000000000000000000000000000000011");
|
||||
await mockETHBalance(ethers.utils.parseEther("1000"));
|
||||
await mockETHBalance(ethers.parseEther("1000"));
|
||||
|
||||
// mock owner of proxy admin
|
||||
proxyAdmin = await ethers.getContractAt("ProxyAdmin", "0xA76acF000C890b0DD7AEEf57627d9899F955d026", deployer);
|
||||
await ethers.provider.send("hardhat_setStorageAt", [
|
||||
proxyAdmin.address,
|
||||
await proxyAdmin.getAddress(),
|
||||
"0x0",
|
||||
ethers.utils.hexlify(ethers.utils.zeroPad(deployer.address, 32)),
|
||||
ethers.AbiCoder.defaultAbiCoder().encode(["address"], [deployer.address]),
|
||||
]);
|
||||
expect(await proxyAdmin.owner()).to.eq(deployer.address);
|
||||
|
||||
@@ -383,7 +414,10 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const upgradeL2 = async (proxy: string, impl: string) => {
|
||||
await proxyAdmin.upgrade(proxy, impl);
|
||||
const L2ScrollMessenger = await ethers.getContractFactory("L2ScrollMessenger", deployer);
|
||||
await proxyAdmin.upgrade(L2_MESSENGER, (await L2ScrollMessenger.deploy(L1_MESSENGER, L2_MESSAGE_QUEUE)).address);
|
||||
await proxyAdmin.upgrade(
|
||||
L2_MESSENGER,
|
||||
(await L2ScrollMessenger.deploy(L1_MESSENGER, L2_MESSAGE_QUEUE)).getAddress()
|
||||
);
|
||||
};
|
||||
|
||||
it.skip("should succeed on L2ETHGateway", async () => {
|
||||
@@ -392,7 +426,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const L2ETHGateway = await ethers.getContractFactory("L2ETHGateway", deployer);
|
||||
const impl = await L2ETHGateway.deploy(L1_GATEWAY, L2_ROUTER, L2_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L2ETHGateway", L2_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseEther("1");
|
||||
const amountIn = ethers.parseEther("1");
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
@@ -411,7 +445,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL2(L2_GATEWAY, impl.address);
|
||||
await upgradeL2(L2_GATEWAY, await impl.getAddress());
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
@@ -438,11 +472,11 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const L2WETHGateway = await ethers.getContractFactory("L2WETHGateway", deployer);
|
||||
const impl = await L2WETHGateway.deploy(L2_WETH, L1_WETH, L1_GATEWAY, L2_ROUTER, L2_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L2WETHGateway", L2_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseEther("1");
|
||||
const amountIn = ethers.parseEther("1");
|
||||
const token = await ethers.getContractAt("MockERC20", L2_WETH, deployer);
|
||||
await mockERC20Balance(token.address, amountIn.mul(10), 0);
|
||||
await token.approve(L2_GATEWAY, constants.MaxUint256);
|
||||
await token.approve(L2_ROUTER, constants.MaxUint256);
|
||||
await mockERC20Balance(await token.getAddress(), amountIn * 10n, 0);
|
||||
await token.approve(L2_GATEWAY, MaxUint256);
|
||||
await token.approve(L2_ROUTER, MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
@@ -455,7 +489,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL2(L2_GATEWAY, impl.address);
|
||||
await upgradeL2(L2_GATEWAY, await impl.getAddress());
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
@@ -480,11 +514,11 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
"0x66e5312EDeEAef6e80759A0F789e7914Fb401484"
|
||||
);
|
||||
const gateway = await ethers.getContractAt("L2StandardERC20Gateway", L2_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseUnits("1", 6);
|
||||
const amountIn = ethers.parseUnits("1", 6);
|
||||
const token = await ethers.getContractAt("MockERC20", L2_USDT, deployer);
|
||||
await mockERC20Balance(token.address, amountIn.mul(10), 51);
|
||||
await token.approve(L2_GATEWAY, constants.MaxUint256);
|
||||
await token.approve(L2_ROUTER, constants.MaxUint256);
|
||||
await mockERC20Balance(await token.getAddress(), amountIn * 10n, 51);
|
||||
await token.approve(L2_GATEWAY, MaxUint256);
|
||||
await token.approve(L2_ROUTER, MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
@@ -497,7 +531,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL2(L2_GATEWAY, impl.address);
|
||||
await upgradeL2(L2_GATEWAY, await impl.getAddress());
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
@@ -517,11 +551,11 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const L2CustomERC20Gateway = await ethers.getContractFactory("L2CustomERC20Gateway", deployer);
|
||||
const impl = await L2CustomERC20Gateway.deploy(L1_GATEWAY, L2_ROUTER, L2_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L2CustomERC20Gateway", L2_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseUnits("1", 18);
|
||||
const amountIn = ethers.parseUnits("1", 18);
|
||||
const token = await ethers.getContractAt("MockERC20", L2_DAI, deployer);
|
||||
await mockERC20Balance(token.address, amountIn.mul(10), 51);
|
||||
await token.approve(L1_GATEWAY, constants.MaxUint256);
|
||||
await token.approve(L1_ROUTER, constants.MaxUint256);
|
||||
await mockERC20Balance(await token.getAddress(), amountIn * 10n, 51);
|
||||
await token.approve(L1_GATEWAY, MaxUint256);
|
||||
await token.approve(L1_ROUTER, MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
@@ -534,7 +568,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL2(L2_GATEWAY, impl.address);
|
||||
await upgradeL2(L2_GATEWAY, await impl.getAddress());
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
@@ -555,11 +589,11 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
const L2USDCGateway = await ethers.getContractFactory("L2USDCGateway", deployer);
|
||||
const impl = await L2USDCGateway.deploy(L1_USDC, L2_USDC, L1_GATEWAY, L2_ROUTER, L2_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L2USDCGateway", L2_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseUnits("1", 6);
|
||||
const amountIn = ethers.parseUnits("1", 6);
|
||||
const token = await ethers.getContractAt("MockERC20", L2_USDC, deployer);
|
||||
await mockERC20Balance(token.address, amountIn.mul(10), 9);
|
||||
await token.approve(L2_GATEWAY, constants.MaxUint256);
|
||||
await token.approve(L2_ROUTER, constants.MaxUint256);
|
||||
await mockERC20Balance(await token.getAddress(), amountIn * 10n, 9);
|
||||
await token.approve(L2_GATEWAY, MaxUint256);
|
||||
await token.approve(L2_ROUTER, MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
@@ -572,7 +606,7 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL2(L2_GATEWAY, impl.address);
|
||||
await upgradeL2(L2_GATEWAY, await impl.getAddress());
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
@@ -584,5 +618,44 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
"L2GatewayRouter.withdrawERC20 USDC after upgrade"
|
||||
);
|
||||
});
|
||||
|
||||
it.skip("should succeed on L2LidoGateway", async () => {
|
||||
const L1_WSTETH = "0x7f39C581F595B53c5cb19bD0b3f8dA6c935E2Ca0";
|
||||
const L2_WSTETH = "0xf610A9dfB7C89644979b4A0f27063E9e7d7Cda32";
|
||||
const L1_GATEWAY = "0x6625C6332c9F91F2D27c304E729B86db87A3f504";
|
||||
const L2_GATEWAY = "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9";
|
||||
const L2LidoGateway = await ethers.getContractFactory("L2LidoGateway", deployer);
|
||||
const impl = await L2LidoGateway.deploy(L1_WSTETH, L2_WSTETH, L1_GATEWAY, L2_ROUTER, L2_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L2LidoGateway", L2_GATEWAY, deployer);
|
||||
const amountIn = ethers.parseUnits("1", 6);
|
||||
const token = await ethers.getContractAt("MockERC20", L2_WSTETH, deployer);
|
||||
await mockERC20Balance(await token.getAddress(), amountIn * 10n, 51);
|
||||
await token.approve(L2_GATEWAY, MaxUint256);
|
||||
await token.approve(L2_ROUTER, MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
await gateway["withdrawERC20(address,uint256,uint256)"](L2_WSTETH, amountIn, 1e6),
|
||||
"L2LidoGateway.withdrawERC20 wstETH before upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await router["withdrawERC20(address,uint256,uint256)"](L2_WSTETH, amountIn, 1e6),
|
||||
"L2GatewayRouter.withdrawERC20 wstETH before upgrade"
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL2(L2_GATEWAY, await impl.getAddress());
|
||||
await gateway.initializeV2(deployer.address, deployer.address, deployer.address, deployer.address);
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
await gateway["withdrawERC20(address,uint256,uint256)"](L2_WSTETH, amountIn, 1e6),
|
||||
"L2LidoGateway.withdrawERC20 wstETH after upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await router["withdrawERC20(address,uint256,uint256)"](L2_WSTETH, amountIn, 1e6),
|
||||
"L2GatewayRouter.withdrawERC20 wstETH after upgrade"
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { ethers } from "hardhat";
|
||||
import { GasSwap, ERC2771Forwarder, MockERC20, MockGasSwapTarget } from "../typechain";
|
||||
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
|
||||
import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers";
|
||||
import { expect } from "chai";
|
||||
import { BigNumber, constants } from "ethers";
|
||||
import { splitSignature } from "ethers/lib/utils";
|
||||
import { MaxUint256, Signature, ZeroAddress, ZeroHash, toBigInt } from "ethers";
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
import { GasSwap, ERC2771Forwarder, MockERC20, MockGasSwapTarget } from "../typechain";
|
||||
|
||||
describe("GasSwap.spec", async () => {
|
||||
let deployer: SignerWithAddress;
|
||||
let signer: SignerWithAddress;
|
||||
let deployer: HardhatEthersSigner;
|
||||
let signer: HardhatEthersSigner;
|
||||
|
||||
let forwarder: ERC2771Forwarder;
|
||||
let swap: GasSwap;
|
||||
@@ -21,19 +21,15 @@ describe("GasSwap.spec", async () => {
|
||||
|
||||
const ERC2771Forwarder = await ethers.getContractFactory("ERC2771Forwarder", deployer);
|
||||
forwarder = await ERC2771Forwarder.deploy("ERC2771Forwarder");
|
||||
await forwarder.deployed();
|
||||
|
||||
const GasSwap = await ethers.getContractFactory("GasSwap", deployer);
|
||||
swap = await GasSwap.deploy(forwarder.address);
|
||||
await swap.deployed();
|
||||
swap = await GasSwap.deploy(forwarder.getAddress());
|
||||
|
||||
const MockGasSwapTarget = await ethers.getContractFactory("MockGasSwapTarget", deployer);
|
||||
target = await MockGasSwapTarget.deploy();
|
||||
await target.deployed();
|
||||
|
||||
const MockERC20 = await ethers.getContractFactory("MockERC20", deployer);
|
||||
token = await MockERC20.deploy("x", "y", 18);
|
||||
await token.deployed();
|
||||
});
|
||||
|
||||
context("auth", async () => {
|
||||
@@ -43,11 +39,11 @@ describe("GasSwap.spec", async () => {
|
||||
|
||||
context("#updateFeeRatio", async () => {
|
||||
it("should revert, when non-owner call", async () => {
|
||||
await expect(swap.connect(signer).updateFeeRatio(1)).to.revertedWith("caller is not the owner");
|
||||
await expect(swap.connect(signer).updateFeeRatio(1)).to.revertedWith("Ownable: caller is not the owner");
|
||||
});
|
||||
|
||||
it("should succeed", async () => {
|
||||
expect(await swap.feeRatio()).to.eq(constants.AddressZero);
|
||||
expect(await swap.feeRatio()).to.eq(ZeroAddress);
|
||||
await expect(swap.updateFeeRatio(100)).to.emit(swap, "UpdateFeeRatio").withArgs(100);
|
||||
expect(await swap.feeRatio()).to.eq(100);
|
||||
});
|
||||
@@ -55,66 +51,62 @@ describe("GasSwap.spec", async () => {
|
||||
|
||||
context("#updateApprovedTarget", async () => {
|
||||
it("should revert, when non-owner call", async () => {
|
||||
await expect(swap.connect(signer).updateApprovedTarget(target.address, false)).to.revertedWith(
|
||||
"caller is not the owner"
|
||||
await expect(swap.connect(signer).updateApprovedTarget(target.getAddress(), false)).to.revertedWith(
|
||||
"Ownable: caller is not the owner"
|
||||
);
|
||||
});
|
||||
|
||||
it("should succeed", async () => {
|
||||
expect(await swap.approvedTargets(target.address)).to.eq(false);
|
||||
await expect(swap.updateApprovedTarget(target.address, true))
|
||||
expect(await swap.approvedTargets(target.getAddress())).to.eq(false);
|
||||
await expect(swap.updateApprovedTarget(target.getAddress(), true))
|
||||
.to.emit(swap, "UpdateApprovedTarget")
|
||||
.withArgs(target.address, true);
|
||||
expect(await swap.approvedTargets(target.address)).to.eq(true);
|
||||
await expect(swap.updateApprovedTarget(target.address, false))
|
||||
.withArgs(await target.getAddress(), true);
|
||||
expect(await swap.approvedTargets(target.getAddress())).to.eq(true);
|
||||
await expect(swap.updateApprovedTarget(target.getAddress(), false))
|
||||
.to.emit(swap, "UpdateApprovedTarget")
|
||||
.withArgs(target.address, false);
|
||||
expect(await swap.approvedTargets(target.address)).to.eq(false);
|
||||
.withArgs(await target.getAddress(), false);
|
||||
expect(await swap.approvedTargets(target.getAddress())).to.eq(false);
|
||||
});
|
||||
});
|
||||
|
||||
context("#withdraw", async () => {
|
||||
it("should revert, when non-owner call", async () => {
|
||||
await expect(swap.connect(signer).withdraw(constants.AddressZero, 0)).to.revertedWith(
|
||||
"caller is not the owner"
|
||||
);
|
||||
await expect(swap.connect(signer).withdraw(ZeroAddress, 0)).to.revertedWith("Ownable: caller is not the owner");
|
||||
});
|
||||
|
||||
it("should succeed, when withdraw ETH", async () => {
|
||||
await deployer.sendTransaction({ to: swap.address, value: ethers.utils.parseEther("1") });
|
||||
const balanceBefore = await deployer.getBalance();
|
||||
const tx = await swap.withdraw(constants.AddressZero, ethers.utils.parseEther("1"));
|
||||
await deployer.sendTransaction({ to: swap.getAddress(), value: ethers.parseEther("1") });
|
||||
const balanceBefore = await ethers.provider.getBalance(deployer.address);
|
||||
const tx = await swap.withdraw(ZeroAddress, ethers.parseEther("1"));
|
||||
const receipt = await tx.wait();
|
||||
const balanceAfter = await deployer.getBalance();
|
||||
expect(balanceAfter.sub(balanceBefore)).to.eq(
|
||||
ethers.utils.parseEther("1").sub(receipt.gasUsed.mul(receipt.effectiveGasPrice))
|
||||
);
|
||||
const balanceAfter = await ethers.provider.getBalance(deployer.address);
|
||||
expect(balanceAfter - balanceBefore).to.eq(ethers.parseEther("1") - receipt!.gasUsed * receipt!.gasPrice);
|
||||
});
|
||||
|
||||
it("should succeed, when withdraw token", async () => {
|
||||
await token.mint(swap.address, ethers.utils.parseEther("1"));
|
||||
await token.mint(swap.getAddress(), ethers.parseEther("1"));
|
||||
const balanceBefore = await token.balanceOf(deployer.address);
|
||||
await swap.withdraw(token.address, ethers.utils.parseEther("1"));
|
||||
await swap.withdraw(token.getAddress(), ethers.parseEther("1"));
|
||||
const balanceAfter = await token.balanceOf(deployer.address);
|
||||
expect(balanceAfter.sub(balanceBefore)).to.eq(ethers.utils.parseEther("1"));
|
||||
expect(balanceAfter - balanceBefore).to.eq(ethers.parseEther("1"));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
const permit = async (amount: BigNumber) => {
|
||||
const permit = async (amount: bigint) => {
|
||||
const value = {
|
||||
owner: signer.address,
|
||||
spender: swap.address,
|
||||
spender: await swap.getAddress(),
|
||||
value: amount,
|
||||
nonce: await token.nonces(signer.address),
|
||||
deadline: constants.MaxUint256,
|
||||
deadline: MaxUint256,
|
||||
};
|
||||
|
||||
const domain = {
|
||||
name: await token.name(),
|
||||
version: "1",
|
||||
chainId: (await ethers.provider.getNetwork()).chainId,
|
||||
verifyingContract: token.address,
|
||||
verifyingContract: await token.getAddress(),
|
||||
};
|
||||
|
||||
const types = {
|
||||
@@ -142,7 +134,7 @@ describe("GasSwap.spec", async () => {
|
||||
],
|
||||
};
|
||||
|
||||
const signature = splitSignature(await signer._signTypedData(domain, types, value));
|
||||
const signature = Signature.from(await signer.signTypedData(domain, types, value));
|
||||
return signature;
|
||||
};
|
||||
|
||||
@@ -151,15 +143,15 @@ describe("GasSwap.spec", async () => {
|
||||
await expect(
|
||||
swap.swap(
|
||||
{
|
||||
token: token.address,
|
||||
token: token.getAddress(),
|
||||
value: 0,
|
||||
deadline: 0,
|
||||
r: constants.HashZero,
|
||||
s: constants.HashZero,
|
||||
r: ZeroHash,
|
||||
s: ZeroHash,
|
||||
v: 0,
|
||||
},
|
||||
{
|
||||
target: target.address,
|
||||
target: target.getAddress(),
|
||||
data: "0x",
|
||||
minOutput: 0,
|
||||
}
|
||||
@@ -168,121 +160,119 @@ describe("GasSwap.spec", async () => {
|
||||
});
|
||||
|
||||
it("should revert, when insufficient output amount", async () => {
|
||||
const amountIn = ethers.utils.parseEther("1");
|
||||
const amountOut = ethers.utils.parseEther("2");
|
||||
const amountIn = ethers.parseEther("1");
|
||||
const amountOut = ethers.parseEther("2");
|
||||
await token.mint(signer.address, amountIn);
|
||||
await deployer.sendTransaction({ to: target.address, value: amountOut });
|
||||
await deployer.sendTransaction({ to: target.getAddress(), value: amountOut });
|
||||
const signature = await permit(amountIn);
|
||||
|
||||
await target.setToken(token.address);
|
||||
await target.setToken(token.getAddress());
|
||||
await target.setAmountIn(amountIn);
|
||||
|
||||
await swap.updateApprovedTarget(target.address, true);
|
||||
await swap.updateApprovedTarget(target.getAddress(), true);
|
||||
await expect(
|
||||
swap.connect(signer).swap(
|
||||
{
|
||||
token: token.address,
|
||||
token: await token.getAddress(),
|
||||
value: amountIn,
|
||||
deadline: constants.MaxUint256,
|
||||
deadline: MaxUint256,
|
||||
r: signature.r,
|
||||
s: signature.s,
|
||||
v: signature.v,
|
||||
},
|
||||
{
|
||||
target: target.address,
|
||||
target: target.getAddress(),
|
||||
data: "0x8119c065",
|
||||
minOutput: amountOut.add(1),
|
||||
minOutput: amountOut + 1n,
|
||||
}
|
||||
)
|
||||
).to.revertedWith("insufficient output amount");
|
||||
});
|
||||
|
||||
for (const refundRatio of ["0", "1", "5"]) {
|
||||
for (const refundRatio of [0n, 1n, 5n]) {
|
||||
for (const feeRatio of ["0", "5", "50"]) {
|
||||
it(`should succeed, when swap by signer directly, with feeRatio[${feeRatio}%] refundRatio[${refundRatio}%]`, async () => {
|
||||
const amountIn = ethers.utils.parseEther("1");
|
||||
const amountOut = ethers.utils.parseEther("2");
|
||||
const amountIn = ethers.parseEther("1");
|
||||
const amountOut = ethers.parseEther("2");
|
||||
await token.mint(signer.address, amountIn);
|
||||
await deployer.sendTransaction({ to: target.address, value: amountOut });
|
||||
await deployer.sendTransaction({ to: target.getAddress(), value: amountOut });
|
||||
const signature = await permit(amountIn);
|
||||
|
||||
await target.setToken(token.address);
|
||||
await target.setToken(token.getAddress());
|
||||
await target.setAmountIn(amountIn);
|
||||
await target.setRefund(amountIn.mul(refundRatio).div(100));
|
||||
await target.setRefund((amountIn * refundRatio) / 100n);
|
||||
|
||||
await swap.updateApprovedTarget(target.address, true);
|
||||
await swap.updateFeeRatio(ethers.utils.parseEther(feeRatio).div(100));
|
||||
const fee = amountOut.mul(feeRatio).div(100);
|
||||
await swap.updateApprovedTarget(target.getAddress(), true);
|
||||
await swap.updateFeeRatio(ethers.parseEther(feeRatio) / 100n);
|
||||
const fee = (amountOut * toBigInt(feeRatio)) / 100n;
|
||||
|
||||
const balanceBefore = await signer.getBalance();
|
||||
const balanceBefore = await ethers.provider.getBalance(signer.address);
|
||||
const tx = await swap.connect(signer).swap(
|
||||
{
|
||||
token: token.address,
|
||||
token: await token.getAddress(),
|
||||
value: amountIn,
|
||||
deadline: constants.MaxUint256,
|
||||
deadline: MaxUint256,
|
||||
r: signature.r,
|
||||
s: signature.s,
|
||||
v: signature.v,
|
||||
},
|
||||
{
|
||||
target: target.address,
|
||||
target: target.getAddress(),
|
||||
data: "0x8119c065",
|
||||
minOutput: amountOut.sub(fee),
|
||||
minOutput: amountOut - fee,
|
||||
}
|
||||
);
|
||||
const receipt = await tx.wait();
|
||||
const balanceAfter = await signer.getBalance();
|
||||
expect(balanceAfter.sub(balanceBefore)).to.eq(
|
||||
amountOut.sub(fee).sub(receipt.gasUsed.mul(receipt.effectiveGasPrice))
|
||||
);
|
||||
expect(await token.balanceOf(signer.address)).to.eq(amountIn.mul(refundRatio).div(100));
|
||||
const balanceAfter = await ethers.provider.getBalance(signer.address);
|
||||
expect(balanceAfter - balanceBefore).to.eq(amountOut - fee - receipt!.gasUsed * receipt!.gasPrice);
|
||||
expect(await token.balanceOf(signer.address)).to.eq((amountIn * refundRatio) / 100n);
|
||||
});
|
||||
|
||||
it(`should succeed, when swap by signer with forwarder, with feeRatio[${feeRatio}%] refundRatio[${refundRatio}%]`, async () => {
|
||||
const amountIn = ethers.utils.parseEther("1");
|
||||
const amountOut = ethers.utils.parseEther("2");
|
||||
const amountIn = ethers.parseEther("1");
|
||||
const amountOut = ethers.parseEther("2");
|
||||
await token.mint(signer.address, amountIn);
|
||||
await deployer.sendTransaction({ to: target.address, value: amountOut });
|
||||
await deployer.sendTransaction({ to: await target.getAddress(), value: amountOut });
|
||||
const permitSignature = await permit(amountIn);
|
||||
|
||||
await target.setToken(token.address);
|
||||
await target.setToken(token.getAddress());
|
||||
await target.setAmountIn(amountIn);
|
||||
await target.setRefund(amountIn.mul(refundRatio).div(100));
|
||||
await target.setRefund((amountIn * refundRatio) / 100n);
|
||||
|
||||
await swap.updateApprovedTarget(target.address, true);
|
||||
await swap.updateFeeRatio(ethers.utils.parseEther(feeRatio).div(100));
|
||||
const fee = amountOut.mul(feeRatio).div(100);
|
||||
await swap.updateApprovedTarget(target.getAddress(), true);
|
||||
await swap.updateFeeRatio(ethers.parseEther(feeRatio) / 100n);
|
||||
const fee = (amountOut * toBigInt(feeRatio)) / 100n;
|
||||
|
||||
const reqWithoutSignature = {
|
||||
from: signer.address,
|
||||
to: swap.address,
|
||||
value: constants.Zero,
|
||||
to: await swap.getAddress(),
|
||||
value: 0n,
|
||||
gas: 1000000,
|
||||
nonce: await forwarder.nonces(signer.address),
|
||||
deadline: 2000000000,
|
||||
data: swap.interface.encodeFunctionData("swap", [
|
||||
{
|
||||
token: token.address,
|
||||
token: await token.getAddress(),
|
||||
value: amountIn,
|
||||
deadline: constants.MaxUint256,
|
||||
deadline: MaxUint256,
|
||||
r: permitSignature.r,
|
||||
s: permitSignature.s,
|
||||
v: permitSignature.v,
|
||||
},
|
||||
{
|
||||
target: target.address,
|
||||
target: await target.getAddress(),
|
||||
data: "0x8119c065",
|
||||
minOutput: amountOut.sub(fee),
|
||||
minOutput: amountOut - fee,
|
||||
},
|
||||
]),
|
||||
};
|
||||
|
||||
const signature = await signer._signTypedData(
|
||||
const signature = await signer.signTypedData(
|
||||
{
|
||||
name: "ERC2771Forwarder",
|
||||
version: "1",
|
||||
chainId: (await ethers.provider.getNetwork()).chainId,
|
||||
verifyingContract: forwarder.address,
|
||||
verifyingContract: await forwarder.getAddress(),
|
||||
},
|
||||
{
|
||||
ForwardRequest: [
|
||||
@@ -319,7 +309,7 @@ describe("GasSwap.spec", async () => {
|
||||
reqWithoutSignature
|
||||
);
|
||||
|
||||
const balanceBefore = await signer.getBalance();
|
||||
const balanceBefore = await ethers.provider.getBalance(signer.address);
|
||||
await forwarder.execute({
|
||||
from: reqWithoutSignature.from,
|
||||
to: reqWithoutSignature.to,
|
||||
@@ -329,9 +319,9 @@ describe("GasSwap.spec", async () => {
|
||||
data: reqWithoutSignature.data,
|
||||
signature,
|
||||
});
|
||||
const balanceAfter = await signer.getBalance();
|
||||
expect(balanceAfter.sub(balanceBefore)).to.eq(amountOut.sub(fee));
|
||||
expect(await token.balanceOf(signer.address)).to.eq(amountIn.mul(refundRatio).div(100));
|
||||
const balanceAfter = await ethers.provider.getBalance(signer.address);
|
||||
expect(balanceAfter - balanceBefore).to.eq(amountOut - fee);
|
||||
expect(await token.balanceOf(signer.address)).to.eq((amountIn * refundRatio) / 100n);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { expect } from "chai";
|
||||
import { BigNumber, BigNumberish, constants } from "ethers";
|
||||
import { concat, RLP } from "ethers/lib/utils";
|
||||
import { BigNumberish, ZeroHash, concat, encodeRlp, toBeHex, toBigInt } from "ethers";
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
import { L1BlockContainer } from "../typechain";
|
||||
|
||||
interface IImportTestConfig {
|
||||
@@ -90,7 +90,7 @@ const testcases: Array<IImportTestConfig> = [
|
||||
];
|
||||
|
||||
function encodeHeader(test: IImportTestConfig): string {
|
||||
return RLP.encode([
|
||||
return encodeRlp([
|
||||
test.parentHash,
|
||||
test.uncleHash,
|
||||
test.coinbase,
|
||||
@@ -98,15 +98,15 @@ function encodeHeader(test: IImportTestConfig): string {
|
||||
test.transactionsRoot,
|
||||
test.receiptsRoot,
|
||||
test.logsBloom,
|
||||
BigNumber.from(test.difficulty).isZero() ? "0x" : BigNumber.from(test.difficulty).toHexString(),
|
||||
BigNumber.from(test.blockHeight).toHexString(),
|
||||
BigNumber.from(test.gasLimit).toHexString(),
|
||||
BigNumber.from(test.gasUsed).toHexString(),
|
||||
BigNumber.from(test.blockTimestamp).toHexString(),
|
||||
toBigInt(test.difficulty) === 0n ? "0x" : toBeHex(test.difficulty),
|
||||
toBeHex(test.blockHeight),
|
||||
toBeHex(test.gasLimit),
|
||||
toBeHex(test.gasUsed),
|
||||
toBeHex(test.blockTimestamp),
|
||||
test.extraData,
|
||||
test.mixHash,
|
||||
test.blockNonce,
|
||||
BigNumber.from(test.baseFee).toHexString(),
|
||||
toBeHex(test.baseFee),
|
||||
]);
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ describe("L1BlockContainer", async () => {
|
||||
const whitelist = await Whitelist.deploy(deployer.address);
|
||||
await whitelist.updateWhitelistStatus([deployer.address], true);
|
||||
|
||||
await container.updateWhitelist(whitelist.address);
|
||||
await container.updateWhitelist(whitelist.getAddress());
|
||||
});
|
||||
|
||||
it("should revert, when sender not allowed", async () => {
|
||||
@@ -137,7 +137,7 @@ describe("L1BlockContainer", async () => {
|
||||
test.stateRoot
|
||||
);
|
||||
|
||||
await expect(container.connect(signer).importBlockHeader(constants.HashZero, [], false)).to.revertedWith(
|
||||
await expect(container.connect(signer).importBlockHeader(ZeroHash, "0x", false)).to.revertedWith(
|
||||
"Not whitelisted sender"
|
||||
);
|
||||
});
|
||||
@@ -172,7 +172,7 @@ describe("L1BlockContainer", async () => {
|
||||
|
||||
it("should revert, when parent not imported", async () => {
|
||||
await container.initialize(
|
||||
constants.HashZero,
|
||||
ZeroHash,
|
||||
test.blockHeight - 1,
|
||||
test.blockTimestamp - 1,
|
||||
test.baseFee,
|
||||
|
||||
@@ -1,18 +1,29 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers";
|
||||
import { expect } from "chai";
|
||||
import { BigNumber, constants } from "ethers";
|
||||
import { concat, getAddress, hexlify, keccak256, randomBytes, RLP, stripZeros } from "ethers/lib/utils";
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
import { L1MessageQueue, L2GasPriceOracle } from "../typechain";
|
||||
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
|
||||
import {
|
||||
MaxUint256,
|
||||
ZeroAddress,
|
||||
concat,
|
||||
encodeRlp,
|
||||
getAddress,
|
||||
hexlify,
|
||||
keccak256,
|
||||
randomBytes,
|
||||
toBeHex,
|
||||
toBigInt,
|
||||
} from "ethers";
|
||||
|
||||
describe("L1MessageQueue", async () => {
|
||||
let deployer: SignerWithAddress;
|
||||
let scrollChain: SignerWithAddress;
|
||||
let messenger: SignerWithAddress;
|
||||
let gateway: SignerWithAddress;
|
||||
let signer: SignerWithAddress;
|
||||
let deployer: HardhatEthersSigner;
|
||||
let scrollChain: HardhatEthersSigner;
|
||||
let messenger: HardhatEthersSigner;
|
||||
let gateway: HardhatEthersSigner;
|
||||
let signer: HardhatEthersSigner;
|
||||
|
||||
let oracle: L2GasPriceOracle;
|
||||
let queue: L1MessageQueue;
|
||||
@@ -21,10 +32,8 @@ describe("L1MessageQueue", async () => {
|
||||
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
|
||||
const Factory = await ethers.getContractFactory(name, deployer);
|
||||
const impl = args.length > 0 ? await Factory.deploy(...args) : await Factory.deploy();
|
||||
await impl.deployed();
|
||||
const proxy = await TransparentUpgradeableProxy.deploy(impl.address, admin, "0x");
|
||||
await proxy.deployed();
|
||||
return proxy.address;
|
||||
const proxy = await TransparentUpgradeableProxy.deploy(impl.getAddress(), admin, "0x");
|
||||
return proxy.getAddress();
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
@@ -32,22 +41,25 @@ describe("L1MessageQueue", async () => {
|
||||
|
||||
const ProxyAdmin = await ethers.getContractFactory("ProxyAdmin", deployer);
|
||||
const admin = await ProxyAdmin.deploy();
|
||||
await admin.deployed();
|
||||
|
||||
queue = await ethers.getContractAt(
|
||||
"L1MessageQueue",
|
||||
await deployProxy("L1MessageQueue", admin.address, [messenger.address, scrollChain.address, gateway.address]),
|
||||
await deployProxy("L1MessageQueue", await admin.getAddress(), [
|
||||
messenger.address,
|
||||
scrollChain.address,
|
||||
gateway.address,
|
||||
]),
|
||||
deployer
|
||||
);
|
||||
|
||||
oracle = await ethers.getContractAt(
|
||||
"L2GasPriceOracle",
|
||||
await deployProxy("L2GasPriceOracle", admin.address, []),
|
||||
await deployProxy("L2GasPriceOracle", await admin.getAddress(), []),
|
||||
deployer
|
||||
);
|
||||
|
||||
await oracle.initialize(21000, 50000, 8, 16);
|
||||
await queue.initialize(messenger.address, scrollChain.address, constants.AddressZero, oracle.address, 10000000);
|
||||
await queue.initialize(messenger.address, scrollChain.address, ZeroAddress, oracle.getAddress(), 10000000);
|
||||
});
|
||||
|
||||
context("auth", async () => {
|
||||
@@ -56,28 +68,28 @@ describe("L1MessageQueue", async () => {
|
||||
expect(await queue.messenger()).to.eq(messenger.address);
|
||||
expect(await queue.scrollChain()).to.eq(scrollChain.address);
|
||||
expect(await queue.enforcedTxGateway()).to.eq(gateway.address);
|
||||
expect(await queue.gasOracle()).to.eq(oracle.address);
|
||||
expect(await queue.gasOracle()).to.eq(await oracle.getAddress());
|
||||
expect(await queue.maxGasLimit()).to.eq(10000000);
|
||||
});
|
||||
|
||||
it("should revert, when initialize again", async () => {
|
||||
await expect(
|
||||
queue.initialize(constants.AddressZero, constants.AddressZero, constants.AddressZero, constants.AddressZero, 0)
|
||||
).to.revertedWith("Initializable: contract is already initialized");
|
||||
await expect(queue.initialize(ZeroAddress, ZeroAddress, ZeroAddress, ZeroAddress, 0)).to.revertedWith(
|
||||
"Initializable: contract is already initialized"
|
||||
);
|
||||
});
|
||||
|
||||
context("#updateGasOracle", async () => {
|
||||
it("should revert, when non-owner call", async () => {
|
||||
await expect(queue.connect(signer).updateGasOracle(constants.AddressZero)).to.revertedWith(
|
||||
await expect(queue.connect(signer).updateGasOracle(ZeroAddress)).to.revertedWith(
|
||||
"Ownable: caller is not the owner"
|
||||
);
|
||||
});
|
||||
|
||||
it("should succeed", async () => {
|
||||
expect(await queue.gasOracle()).to.eq(oracle.address);
|
||||
expect(await queue.gasOracle()).to.eq(await oracle.getAddress());
|
||||
await expect(queue.updateGasOracle(deployer.address))
|
||||
.to.emit(queue, "UpdateGasOracle")
|
||||
.withArgs(oracle.address, deployer.address);
|
||||
.withArgs(await oracle.getAddress(), deployer.address);
|
||||
expect(await queue.gasOracle()).to.eq(deployer.address);
|
||||
});
|
||||
});
|
||||
@@ -101,30 +113,9 @@ describe("L1MessageQueue", async () => {
|
||||
const target = "0xcb18150e4efefb6786130e289a5f61a82a5b86d7";
|
||||
const transactionType = "0x7E";
|
||||
|
||||
for (const nonce of [
|
||||
BigNumber.from(0),
|
||||
BigNumber.from(1),
|
||||
BigNumber.from(127),
|
||||
BigNumber.from(128),
|
||||
BigNumber.from(22334455),
|
||||
constants.MaxUint256,
|
||||
]) {
|
||||
for (const value of [
|
||||
BigNumber.from(0),
|
||||
BigNumber.from(1),
|
||||
BigNumber.from(127),
|
||||
BigNumber.from(128),
|
||||
BigNumber.from(22334455),
|
||||
constants.MaxUint256,
|
||||
]) {
|
||||
for (const gasLimit of [
|
||||
BigNumber.from(0),
|
||||
BigNumber.from(1),
|
||||
BigNumber.from(127),
|
||||
BigNumber.from(128),
|
||||
BigNumber.from(22334455),
|
||||
constants.MaxUint256,
|
||||
]) {
|
||||
for (const nonce of [0n, 1n, 127n, 128n, 22334455n, MaxUint256]) {
|
||||
for (const value of [0n, 1n, 127n, 128n, 22334455n, MaxUint256]) {
|
||||
for (const gasLimit of [0n, 1n, 127n, 128n, 22334455n, MaxUint256]) {
|
||||
for (const dataLen of [0, 1, 2, 3, 4, 55, 56, 100]) {
|
||||
const tests = [randomBytes(dataLen)];
|
||||
if (dataLen === 1) {
|
||||
@@ -133,11 +124,11 @@ describe("L1MessageQueue", async () => {
|
||||
}
|
||||
}
|
||||
for (const data of tests) {
|
||||
const transactionPayload = RLP.encode([
|
||||
stripZeros(nonce.toHexString()),
|
||||
stripZeros(gasLimit.toHexString()),
|
||||
const transactionPayload = encodeRlp([
|
||||
nonce === 0n ? "0x" : toBeHex(nonce),
|
||||
gasLimit === 0n ? "0x" : toBeHex(gasLimit),
|
||||
target,
|
||||
stripZeros(value.toHexString()),
|
||||
value === 0n ? "0x" : toBeHex(value),
|
||||
data,
|
||||
sender,
|
||||
]);
|
||||
@@ -159,30 +150,27 @@ describe("L1MessageQueue", async () => {
|
||||
|
||||
context("#appendCrossDomainMessage", async () => {
|
||||
it("should revert, when non-messenger call", async () => {
|
||||
await expect(queue.connect(signer).appendCrossDomainMessage(constants.AddressZero, 0, "0x")).to.revertedWith(
|
||||
await expect(queue.connect(signer).appendCrossDomainMessage(ZeroAddress, 0, "0x")).to.revertedWith(
|
||||
"Only callable by the L1ScrollMessenger"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert, when exceed maxGasLimit", async () => {
|
||||
await expect(
|
||||
queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 10000001, "0x")
|
||||
).to.revertedWith("Gas limit must not exceed maxGasLimit");
|
||||
await expect(queue.connect(messenger).appendCrossDomainMessage(ZeroAddress, 10000001, "0x")).to.revertedWith(
|
||||
"Gas limit must not exceed maxGasLimit"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert, when below intrinsic gas", async () => {
|
||||
await expect(queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 0, "0x")).to.revertedWith(
|
||||
await expect(queue.connect(messenger).appendCrossDomainMessage(ZeroAddress, 0, "0x")).to.revertedWith(
|
||||
"Insufficient gas limit, must be above intrinsic gas"
|
||||
);
|
||||
});
|
||||
|
||||
it("should succeed", async () => {
|
||||
expect(await queue.nextCrossDomainMessageIndex()).to.eq(constants.Zero);
|
||||
expect(await queue.nextCrossDomainMessageIndex()).to.eq(0n);
|
||||
const sender = getAddress(
|
||||
BigNumber.from(messenger.address)
|
||||
.add("0x1111000000000000000000000000000000001111")
|
||||
.mod(BigNumber.from(2).pow(160))
|
||||
.toHexString()
|
||||
toBeHex((toBigInt(messenger.address) + toBigInt("0x1111000000000000000000000000000000001111")) % 2n ** 160n)
|
||||
.slice(2)
|
||||
.padStart(40, "0")
|
||||
);
|
||||
@@ -190,7 +178,7 @@ describe("L1MessageQueue", async () => {
|
||||
await expect(queue.connect(messenger).appendCrossDomainMessage(signer.address, 100000, "0x01"))
|
||||
.to.emit(queue, "QueueTransaction")
|
||||
.withArgs(sender, signer.address, 0, 0, 100000, "0x01");
|
||||
expect(await queue.nextCrossDomainMessageIndex()).to.eq(constants.One);
|
||||
expect(await queue.nextCrossDomainMessageIndex()).to.eq(1n);
|
||||
expect(await queue.getCrossDomainMessage(0)).to.eq(hash);
|
||||
});
|
||||
});
|
||||
@@ -198,30 +186,30 @@ describe("L1MessageQueue", async () => {
|
||||
context("#appendEnforcedTransaction", async () => {
|
||||
it("should revert, when non-gateway call", async () => {
|
||||
await expect(
|
||||
queue.connect(signer).appendEnforcedTransaction(signer.address, constants.AddressZero, 0, 0, "0x")
|
||||
queue.connect(signer).appendEnforcedTransaction(signer.address, ZeroAddress, 0, 0, "0x")
|
||||
).to.revertedWith("Only callable by the EnforcedTxGateway");
|
||||
});
|
||||
|
||||
it("should revert, when sender is not EOA", async () => {
|
||||
await expect(
|
||||
queue.connect(gateway).appendEnforcedTransaction(queue.address, constants.AddressZero, 0, 0, "0x")
|
||||
queue.connect(gateway).appendEnforcedTransaction(queue.getAddress(), ZeroAddress, 0, 0, "0x")
|
||||
).to.revertedWith("only EOA");
|
||||
});
|
||||
|
||||
it("should revert, when exceed maxGasLimit", async () => {
|
||||
await expect(
|
||||
queue.connect(gateway).appendEnforcedTransaction(signer.address, constants.AddressZero, 0, 10000001, "0x")
|
||||
queue.connect(gateway).appendEnforcedTransaction(signer.address, ZeroAddress, 0, 10000001, "0x")
|
||||
).to.revertedWith("Gas limit must not exceed maxGasLimit");
|
||||
});
|
||||
|
||||
it("should revert, when below intrinsic gas", async () => {
|
||||
await expect(
|
||||
queue.connect(gateway).appendEnforcedTransaction(signer.address, constants.AddressZero, 0, 0, "0x")
|
||||
queue.connect(gateway).appendEnforcedTransaction(signer.address, ZeroAddress, 0, 0, "0x")
|
||||
).to.revertedWith("Insufficient gas limit, must be above intrinsic gas");
|
||||
});
|
||||
|
||||
it("should succeed", async () => {
|
||||
expect(await queue.nextCrossDomainMessageIndex()).to.eq(constants.Zero);
|
||||
expect(await queue.nextCrossDomainMessageIndex()).to.eq(0n);
|
||||
const sender = signer.address;
|
||||
const hash = await queue.computeTransactionHash(sender, 0, 200, signer.address, 100000, "0x01");
|
||||
await expect(
|
||||
@@ -229,7 +217,7 @@ describe("L1MessageQueue", async () => {
|
||||
)
|
||||
.to.emit(queue, "QueueTransaction")
|
||||
.withArgs(sender, signer.address, 200, 0, 100000, "0x01");
|
||||
expect(await queue.nextCrossDomainMessageIndex()).to.eq(constants.One);
|
||||
expect(await queue.nextCrossDomainMessageIndex()).to.eq(1n);
|
||||
expect(await queue.getCrossDomainMessage(0)).to.eq(hash);
|
||||
});
|
||||
});
|
||||
@@ -254,7 +242,7 @@ describe("L1MessageQueue", async () => {
|
||||
it("should succeed", async () => {
|
||||
// append 512 messages
|
||||
for (let i = 0; i < 256 * 2; i++) {
|
||||
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
|
||||
await queue.connect(messenger).appendCrossDomainMessage(ZeroAddress, 1000000, "0x");
|
||||
}
|
||||
|
||||
// pop 50 messages with no skip
|
||||
@@ -292,17 +280,12 @@ describe("L1MessageQueue", async () => {
|
||||
}
|
||||
|
||||
// pop 256 messages with random skip
|
||||
const bitmap = BigNumber.from("0x496525059c3f33758d17030403e45afe067b8a0ae1317cda0487fd2932cbea1a");
|
||||
const bitmap = toBigInt("0x496525059c3f33758d17030403e45afe067b8a0ae1317cda0487fd2932cbea1a");
|
||||
const tx = await queue.connect(scrollChain).popCrossDomainMessage(80, 256, bitmap);
|
||||
await expect(tx).to.emit(queue, "DequeueTransaction").withArgs(80, 256, bitmap);
|
||||
console.log("gas used:", (await tx.wait()).gasUsed.toString());
|
||||
console.log("gas used:", (await tx.wait())!.gasUsed.toString());
|
||||
for (let i = 80; i < 80 + 256; i++) {
|
||||
expect(await queue.isMessageSkipped(i)).to.eq(
|
||||
bitmap
|
||||
.shr(i - 80)
|
||||
.and(1)
|
||||
.eq(1)
|
||||
);
|
||||
expect(await queue.isMessageSkipped(i)).to.eq(((bitmap >> toBigInt(i - 80)) & 1n) === 1n);
|
||||
expect(await queue.isMessageDropped(i)).to.eq(false);
|
||||
}
|
||||
});
|
||||
@@ -314,39 +297,39 @@ describe("L1MessageQueue", async () => {
|
||||
it.skip(`should succeed on random tests, pop three times each with ${count1} ${count2} ${count3} msgs`, async () => {
|
||||
// append count1 + count2 + count3 messages
|
||||
for (let i = 0; i < count1 + count2 + count3; i++) {
|
||||
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
|
||||
await queue.connect(messenger).appendCrossDomainMessage(ZeroAddress, 1000000, "0x");
|
||||
}
|
||||
|
||||
// first pop `count1` messages
|
||||
const bitmap1 = BigNumber.from(randomBytes(32));
|
||||
const bitmap1 = toBigInt(randomBytes(32));
|
||||
let tx = await queue.connect(scrollChain).popCrossDomainMessage(0, count1, bitmap1);
|
||||
await expect(tx)
|
||||
.to.emit(queue, "DequeueTransaction")
|
||||
.withArgs(0, count1, bitmap1.and(constants.One.shl(count1).sub(1)));
|
||||
.withArgs(0, count1, bitmap1 & ((1n << toBigInt(count1)) - 1n));
|
||||
for (let i = 0; i < count1; i++) {
|
||||
expect(await queue.isMessageSkipped(i)).to.eq(bitmap1.shr(i).and(1).eq(1));
|
||||
expect(await queue.isMessageSkipped(i)).to.eq(((bitmap1 >> toBigInt(i)) & 1n) === 1n);
|
||||
expect(await queue.isMessageDropped(i)).to.eq(false);
|
||||
}
|
||||
|
||||
// then pop `count2` messages
|
||||
const bitmap2 = BigNumber.from(randomBytes(32));
|
||||
const bitmap2 = toBigInt(randomBytes(32));
|
||||
tx = await queue.connect(scrollChain).popCrossDomainMessage(count1, count2, bitmap2);
|
||||
await expect(tx)
|
||||
.to.emit(queue, "DequeueTransaction")
|
||||
.withArgs(count1, count2, bitmap2.and(constants.One.shl(count2).sub(1)));
|
||||
.withArgs(count1, count2, bitmap2 & ((1n << toBigInt(count2)) - 1n));
|
||||
for (let i = 0; i < count2; i++) {
|
||||
expect(await queue.isMessageSkipped(i + count1)).to.eq(bitmap2.shr(i).and(1).eq(1));
|
||||
expect(await queue.isMessageSkipped(i + count1)).to.eq(((bitmap2 >> toBigInt(i)) & 1n) === 1n);
|
||||
expect(await queue.isMessageDropped(i + count1)).to.eq(false);
|
||||
}
|
||||
|
||||
// last pop `count3` messages
|
||||
const bitmap3 = BigNumber.from(randomBytes(32));
|
||||
const bitmap3 = toBigInt(randomBytes(32));
|
||||
tx = await queue.connect(scrollChain).popCrossDomainMessage(count1 + count2, count3, bitmap3);
|
||||
await expect(tx)
|
||||
.to.emit(queue, "DequeueTransaction")
|
||||
.withArgs(count1 + count2, count3, bitmap3.and(constants.One.shl(count3).sub(1)));
|
||||
.withArgs(count1 + count2, count3, bitmap3 & ((1n << toBigInt(count3)) - 1n));
|
||||
for (let i = 0; i < count3; i++) {
|
||||
expect(await queue.isMessageSkipped(i + count1 + count2)).to.eq(bitmap3.shr(i).and(1).eq(1));
|
||||
expect(await queue.isMessageSkipped(i + count1 + count2)).to.eq(((bitmap3 >> toBigInt(i)) & 1n) === 1n);
|
||||
expect(await queue.isMessageDropped(i + count1 + count2)).to.eq(false);
|
||||
}
|
||||
});
|
||||
@@ -365,7 +348,7 @@ describe("L1MessageQueue", async () => {
|
||||
it("should revert, when drop non-skipped message", async () => {
|
||||
// append 10 messages
|
||||
for (let i = 0; i < 10; i++) {
|
||||
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
|
||||
await queue.connect(messenger).appendCrossDomainMessage(ZeroAddress, 1000000, "0x");
|
||||
}
|
||||
// pop 5 messages with no skip
|
||||
await expect(queue.connect(scrollChain).popCrossDomainMessage(0, 5, 0))
|
||||
@@ -390,7 +373,7 @@ describe("L1MessageQueue", async () => {
|
||||
it("should succeed", async () => {
|
||||
// append 10 messages
|
||||
for (let i = 0; i < 10; i++) {
|
||||
await queue.connect(messenger).appendCrossDomainMessage(constants.AddressZero, 1000000, "0x");
|
||||
await queue.connect(messenger).appendCrossDomainMessage(ZeroAddress, 1000000, "0x");
|
||||
}
|
||||
// pop 10 messages, all skipped
|
||||
await expect(queue.connect(scrollChain).popCrossDomainMessage(0, 10, 0x3ff))
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { expect } from "chai";
|
||||
import { concat } from "ethers/lib/utils";
|
||||
import { concat } from "ethers";
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
import { MockPatriciaMerkleTrieVerifier } from "../typechain";
|
||||
|
||||
interface ITestConfig {
|
||||
@@ -121,7 +122,6 @@ describe("PatriciaMerkleTrieVerifier", async () => {
|
||||
|
||||
const MockPatriciaMerkleTrieVerifier = await ethers.getContractFactory("MockPatriciaMerkleTrieVerifier", deployer);
|
||||
verifier = await MockPatriciaMerkleTrieVerifier.deploy();
|
||||
await verifier.deployed();
|
||||
});
|
||||
|
||||
for (const test of testcases) {
|
||||
|
||||
84
contracts/integration-test/PoseidonHash.spec.ts
Normal file
84
contracts/integration-test/PoseidonHash.spec.ts
Normal file
@@ -0,0 +1,84 @@
|
||||
/* eslint-disable node/no-missing-import */
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
import { expect } from "chai";
|
||||
import { randomBytes } from "crypto";
|
||||
import { Contract, toBigInt } from "ethers";
|
||||
import fs from "fs";
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
import PoseidonWithoutDomain from "circomlib/src/poseidon_gencontract";
|
||||
import { generateABI, createCode } from "../scripts/poseidon";
|
||||
|
||||
describe("PoseidonHash.spec", async () => {
|
||||
// test against with circomlib's implementation.
|
||||
context("domain = zero", async () => {
|
||||
let poseidonCircom: Contract;
|
||||
let poseidon: Contract;
|
||||
|
||||
beforeEach(async () => {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
|
||||
const PoseidonWithoutDomainFactory = new ethers.ContractFactory(
|
||||
PoseidonWithoutDomain.generateABI(2),
|
||||
PoseidonWithoutDomain.createCode(2),
|
||||
deployer
|
||||
);
|
||||
poseidonCircom = (await PoseidonWithoutDomainFactory.deploy()) as Contract;
|
||||
|
||||
const PoseidonWithDomainFactory = new ethers.ContractFactory(generateABI(2), createCode(2), deployer);
|
||||
poseidon = (await PoseidonWithDomainFactory.deploy()) as Contract;
|
||||
});
|
||||
|
||||
it("should succeed on zero inputs", async () => {
|
||||
expect(await poseidonCircom["poseidon(uint256[2])"]([0, 0])).to.eq(
|
||||
await poseidon["poseidon(uint256[2],uint256)"]([0, 0], 0)
|
||||
);
|
||||
});
|
||||
|
||||
it("should succeed on random inputs", async () => {
|
||||
for (let bytes = 1; bytes <= 32; ++bytes) {
|
||||
for (let i = 0; i < 5; ++i) {
|
||||
const a = toBigInt(randomBytes(bytes));
|
||||
const b = toBigInt(randomBytes(bytes));
|
||||
expect(await poseidonCircom["poseidon(uint256[2])"]([a, b])).to.eq(
|
||||
await poseidon["poseidon(uint256[2],uint256)"]([a, b], 0)
|
||||
);
|
||||
expect(await poseidonCircom["poseidon(uint256[2])"]([a, 0])).to.eq(
|
||||
await poseidon["poseidon(uint256[2],uint256)"]([a, 0], 0)
|
||||
);
|
||||
expect(await poseidonCircom["poseidon(uint256[2])"]([0, b])).to.eq(
|
||||
await poseidon["poseidon(uint256[2],uint256)"]([0, b], 0)
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// test against with scroll's go implementation.
|
||||
context("domain = nonzero", async () => {
|
||||
let poseidon: Contract;
|
||||
|
||||
beforeEach(async () => {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
const PoseidonWithDomainFactory = new ethers.ContractFactory(generateABI(2), createCode(2), deployer);
|
||||
poseidon = (await PoseidonWithDomainFactory.deploy()) as Contract;
|
||||
});
|
||||
|
||||
it("should succeed on zero inputs", async () => {
|
||||
expect(await poseidon["poseidon(uint256[2],uint256)"]([0, 0], 6)).to.eq(
|
||||
toBigInt("17848312925884193353134534408113064827548730776291701343555436351962284922129")
|
||||
);
|
||||
expect(await poseidon["poseidon(uint256[2],uint256)"]([0, 0], 7)).to.eq(
|
||||
toBigInt("20994231331856095272861976502721128670019193481895476667943874333621461724676")
|
||||
);
|
||||
});
|
||||
|
||||
it("should succeed on random inputs", async () => {
|
||||
const lines = String(fs.readFileSync("./integration-test/testdata/poseidon_hash_with_domain.data")).split("\n");
|
||||
for (const line of lines) {
|
||||
const [domain, a, b, hash] = line.split(" ");
|
||||
expect(await poseidon["poseidon(uint256[2],uint256)"]([a, b], domain)).to.eq(toBigInt(hash));
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
162
contracts/integration-test/ScrollChain.blob.spec.ts
Normal file
162
contracts/integration-test/ScrollChain.blob.spec.ts
Normal file
@@ -0,0 +1,162 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { ZeroAddress } from "ethers";
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
import { ScrollChain, L1MessageQueue } from "../typechain";
|
||||
import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers";
|
||||
import { randomBytes } from "crypto";
|
||||
import { expect } from "chai";
|
||||
|
||||
describe("ScrollChain.blob", async () => {
|
||||
let deployer: HardhatEthersSigner;
|
||||
let signer: HardhatEthersSigner;
|
||||
|
||||
let queue: L1MessageQueue;
|
||||
let chain: ScrollChain;
|
||||
|
||||
beforeEach(async () => {
|
||||
[deployer, signer] = await ethers.getSigners();
|
||||
|
||||
const EmptyContract = await ethers.getContractFactory("EmptyContract", deployer);
|
||||
const empty = await EmptyContract.deploy();
|
||||
|
||||
const ProxyAdmin = await ethers.getContractFactory("ProxyAdmin", deployer);
|
||||
const admin = await ProxyAdmin.deploy();
|
||||
|
||||
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
|
||||
const queueProxy = await TransparentUpgradeableProxy.deploy(empty.getAddress(), admin.getAddress(), "0x");
|
||||
const chainProxy = await TransparentUpgradeableProxy.deploy(empty.getAddress(), admin.getAddress(), "0x");
|
||||
|
||||
const L1MessageQueue = await ethers.getContractFactory("L1MessageQueue", deployer);
|
||||
const queueImpl = await L1MessageQueue.deploy(deployer.address, chainProxy.getAddress(), deployer.address);
|
||||
await admin.upgrade(queueProxy.getAddress(), queueImpl.getAddress());
|
||||
|
||||
const ScrollChain = await ethers.getContractFactory("ScrollChain", deployer);
|
||||
const chainImpl = await ScrollChain.deploy(0, queueProxy.getAddress(), deployer.address);
|
||||
await admin.upgrade(chainProxy.getAddress(), chainImpl.getAddress());
|
||||
|
||||
queue = await ethers.getContractAt("L1MessageQueue", await queueProxy.getAddress(), deployer);
|
||||
chain = await ethers.getContractAt("ScrollChain", await chainProxy.getAddress(), deployer);
|
||||
|
||||
await chain.initialize(queue.getAddress(), ZeroAddress, 100);
|
||||
await chain.addSequencer(deployer.address);
|
||||
await chain.addProver(deployer.address);
|
||||
await queue.initialize(deployer.address, chain.getAddress(), deployer.address, deployer.address, 10000000);
|
||||
});
|
||||
|
||||
context("commit batch", async () => {
|
||||
let batchHeader0: Uint8Array;
|
||||
|
||||
beforeEach(async () => {
|
||||
// import 10 L1 messages
|
||||
for (let i = 0; i < 10; i++) {
|
||||
queue.appendCrossDomainMessage(deployer.address, 1000000, "0x");
|
||||
}
|
||||
|
||||
// import genesis batch first
|
||||
batchHeader0 = new Uint8Array(89);
|
||||
batchHeader0[25] = 1;
|
||||
await chain.importGenesisBatch(batchHeader0, randomBytes(32));
|
||||
});
|
||||
|
||||
it("should revert when caller is not sequencer", async () => {
|
||||
await expect(chain.connect(signer).commitBatch(1, batchHeader0, [], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorCallerIsNotSequencer"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert when batch is empty", async () => {
|
||||
await expect(chain.commitBatch(1, batchHeader0, [], "0x")).to.revertedWithCustomError(chain, "ErrorBatchIsEmpty");
|
||||
});
|
||||
|
||||
it("should revert when batch header length too small", async () => {
|
||||
const header = new Uint8Array(120);
|
||||
header[0] = 1;
|
||||
await expect(chain.commitBatch(1, header, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorBatchHeaderLengthTooSmall"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert when wrong bitmap length", async () => {
|
||||
const header = new Uint8Array(122);
|
||||
header[0] = 1;
|
||||
await expect(chain.commitBatch(1, header, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorIncorrectBitmapLength"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert when incorrect parent batch hash", async () => {
|
||||
batchHeader0[25] = 2;
|
||||
await expect(chain.commitBatch(1, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorIncorrectBatchHash"
|
||||
);
|
||||
batchHeader0[25] = 1;
|
||||
});
|
||||
|
||||
it("should revert when ErrorInvalidBatchHeaderVersion", async () => {
|
||||
const header = new Uint8Array(121);
|
||||
header[0] = 2;
|
||||
await expect(chain.commitBatch(1, header, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorInvalidBatchHeaderVersion"
|
||||
);
|
||||
await expect(chain.commitBatch(2, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorInvalidBatchHeaderVersion"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert when ErrorNoBlobFound", async () => {
|
||||
await expect(chain.commitBatch(1, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorNoBlobFound"
|
||||
);
|
||||
});
|
||||
|
||||
/* Hardhat doesn't have support for EIP4844 yet.
|
||||
const makeTransaction = async (data: string, value: bigint, blobVersionedHashes: Array<string>) => {
|
||||
const tx = new Transaction();
|
||||
tx.type = 3;
|
||||
tx.to = await chain.getAddress();
|
||||
tx.data = data;
|
||||
tx.nonce = await deployer.getNonce();
|
||||
tx.gasLimit = 1000000;
|
||||
tx.maxPriorityFeePerGas = (await ethers.provider.getFeeData()).maxPriorityFeePerGas;
|
||||
tx.maxFeePerGas = (await ethers.provider.getFeeData()).maxFeePerGas;
|
||||
tx.value = value;
|
||||
tx.chainId = (await ethers.provider.getNetwork()).chainId;
|
||||
tx.maxFeePerBlobGas = ethers.parseUnits("1", "gwei");
|
||||
tx.blobVersionedHashes = blobVersionedHashes;
|
||||
return tx;
|
||||
};
|
||||
|
||||
it("should revert when ErrorFoundMultipleBlob", async () => {
|
||||
const data = chain.interface.encodeFunctionData("commitBatch", [1, batchHeader0, ["0x"], "0x"]);
|
||||
const tx = await makeTransaction(data, 0n, [ZeroHash, ZeroHash]);
|
||||
const signature = await deployer.signMessage(tx.unsignedHash);
|
||||
tx.signature = Signature.from(signature);
|
||||
const r = await ethers.provider.broadcastTransaction(tx.serialized);
|
||||
await expect(r).to.revertedWithCustomError(chain, "ErrorFoundMultipleBlob");
|
||||
});
|
||||
|
||||
it("should revert when ErrorNoBlockInChunk", async () => {});
|
||||
|
||||
it("should revert when ErrorIncorrectChunkLength", async () => {});
|
||||
|
||||
it("should revert when ErrorLastL1MessageSkipped", async () => {});
|
||||
|
||||
it("should revert when ErrorNumTxsLessThanNumL1Msgs", async () => {});
|
||||
|
||||
it("should revert when ErrorTooManyTxsInOneChunk", async () => {});
|
||||
|
||||
it("should revert when ErrorIncorrectBitmapLength", async () => {});
|
||||
|
||||
it("should succeed", async () => {});
|
||||
*/
|
||||
});
|
||||
});
|
||||
@@ -1,8 +1,8 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { concat } from "ethers/lib/utils";
|
||||
import { constants } from "ethers";
|
||||
import { ZeroAddress, concat, getBytes } from "ethers";
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
import { ScrollChain, L1MessageQueue } from "../typechain";
|
||||
|
||||
describe("ScrollChain", async () => {
|
||||
@@ -14,40 +14,28 @@ describe("ScrollChain", async () => {
|
||||
|
||||
const EmptyContract = await ethers.getContractFactory("EmptyContract", deployer);
|
||||
const empty = await EmptyContract.deploy();
|
||||
await empty.deployed();
|
||||
|
||||
const ProxyAdmin = await ethers.getContractFactory("ProxyAdmin", deployer);
|
||||
const admin = await ProxyAdmin.deploy();
|
||||
await admin.deployed();
|
||||
|
||||
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
|
||||
const queueProxy = await TransparentUpgradeableProxy.deploy(empty.address, admin.address, "0x");
|
||||
await queueProxy.deployed();
|
||||
const chainProxy = await TransparentUpgradeableProxy.deploy(empty.address, admin.address, "0x");
|
||||
await chainProxy.deployed();
|
||||
const queueProxy = await TransparentUpgradeableProxy.deploy(empty.getAddress(), admin.getAddress(), "0x");
|
||||
const chainProxy = await TransparentUpgradeableProxy.deploy(empty.getAddress(), admin.getAddress(), "0x");
|
||||
|
||||
const L1MessageQueue = await ethers.getContractFactory("L1MessageQueue", deployer);
|
||||
const queueImpl = await L1MessageQueue.deploy(constants.AddressZero, chainProxy.address, deployer.address);
|
||||
await queueImpl.deployed();
|
||||
await admin.upgrade(queueProxy.address, queueImpl.address);
|
||||
const queueImpl = await L1MessageQueue.deploy(ZeroAddress, chainProxy.getAddress(), deployer.address);
|
||||
await admin.upgrade(queueProxy.getAddress(), queueImpl.getAddress());
|
||||
|
||||
const ScrollChain = await ethers.getContractFactory("ScrollChain", deployer);
|
||||
const chainImpl = await ScrollChain.deploy(0, queueProxy.address, deployer.address);
|
||||
await chainImpl.deployed();
|
||||
await admin.upgrade(chainProxy.address, chainImpl.address);
|
||||
const chainImpl = await ScrollChain.deploy(0, queueProxy.getAddress(), deployer.address);
|
||||
await admin.upgrade(chainProxy.getAddress(), chainImpl.getAddress());
|
||||
|
||||
queue = await ethers.getContractAt("L1MessageQueue", queueProxy.address, deployer);
|
||||
chain = await ethers.getContractAt("ScrollChain", chainProxy.address, deployer);
|
||||
queue = await ethers.getContractAt("L1MessageQueue", await queueProxy.getAddress(), deployer);
|
||||
chain = await ethers.getContractAt("ScrollChain", await chainProxy.getAddress(), deployer);
|
||||
|
||||
await chain.initialize(queue.address, constants.AddressZero, 100);
|
||||
await chain.initialize(queue.getAddress(), ZeroAddress, 100);
|
||||
await chain.addSequencer(deployer.address);
|
||||
await queue.initialize(
|
||||
constants.AddressZero,
|
||||
chain.address,
|
||||
constants.AddressZero,
|
||||
constants.AddressZero,
|
||||
10000000
|
||||
);
|
||||
await queue.initialize(ZeroAddress, chain.getAddress(), ZeroAddress, ZeroAddress, 10000000);
|
||||
});
|
||||
|
||||
// @note skip this benchmark tests
|
||||
@@ -82,12 +70,12 @@ describe("ScrollChain", async () => {
|
||||
for (let i = 0; i < numChunks; i++) {
|
||||
const txsInChunk: Array<Uint8Array> = [];
|
||||
for (let j = 0; j < numBlocks; j++) {
|
||||
txsInChunk.push(concat(txs));
|
||||
txsInChunk.push(getBytes(concat(txs)));
|
||||
}
|
||||
chunks.push(concat([chunk, concat(txsInChunk)]));
|
||||
chunks.push(getBytes(concat([chunk, concat(txsInChunk)])));
|
||||
}
|
||||
|
||||
const estimateGas = await chain.estimateGas.commitBatch(0, batchHeader0, chunks, "0x");
|
||||
const estimateGas = await chain.commitBatch.estimateGas(0, batchHeader0, chunks, "0x");
|
||||
console.log(
|
||||
`${numChunks}`,
|
||||
`${numBlocks}`,
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers";
|
||||
import { expect } from "chai";
|
||||
import { hexlify } from "ethers/lib/utils";
|
||||
import { ethers } from "hardhat";
|
||||
import { ZkEvmVerifierV1 } from "../typechain";
|
||||
import { SignerWithAddress } from "@nomiclabs/hardhat-ethers/signers";
|
||||
import { hexlify } from "ethers";
|
||||
import fs from "fs";
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
import { ZkEvmVerifierV1 } from "../typechain";
|
||||
|
||||
describe("ZkEvmVerifierV1", async () => {
|
||||
let deployer: SignerWithAddress;
|
||||
let deployer: HardhatEthersSigner;
|
||||
|
||||
let zkEvmVerifier: ZkEvmVerifierV1;
|
||||
|
||||
@@ -20,8 +21,7 @@ describe("ZkEvmVerifierV1", async () => {
|
||||
const receipt = await tx.wait();
|
||||
|
||||
const ZkEvmVerifierV1 = await ethers.getContractFactory("ZkEvmVerifierV1", deployer);
|
||||
zkEvmVerifier = await ZkEvmVerifierV1.deploy(receipt.contractAddress);
|
||||
await zkEvmVerifier.deployed();
|
||||
zkEvmVerifier = await ZkEvmVerifierV1.deploy(receipt!.contractAddress!);
|
||||
});
|
||||
|
||||
it("should succeed", async () => {
|
||||
@@ -37,7 +37,7 @@ describe("ZkEvmVerifierV1", async () => {
|
||||
|
||||
// verify ok
|
||||
await zkEvmVerifier.verify(proof, publicInputHash);
|
||||
console.log("Gas Usage:", (await zkEvmVerifier.estimateGas.verify(proof, publicInputHash)).toString());
|
||||
console.log("Gas Usage:", (await zkEvmVerifier.verify.estimateGas(proof, publicInputHash)).toString());
|
||||
|
||||
// verify failed
|
||||
await expect(zkEvmVerifier.verify(proof, publicInputHash.reverse())).to.reverted;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user