mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
13 Commits
v4.3.53
...
fix-rollup
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b621cffd58 | ||
|
|
bab1841193 | ||
|
|
ed99ac8569 | ||
|
|
d3ec65fd8b | ||
|
|
7e958d6e9a | ||
|
|
670e848b2c | ||
|
|
c6e8fcb2d3 | ||
|
|
c68f4283b1 | ||
|
|
97745fc4d0 | ||
|
|
44c5f1c8b4 | ||
|
|
9bb48689ec | ||
|
|
352aea4e70 | ||
|
|
69224ebb93 |
249
.github/workflows/docker.yml
vendored
249
.github/workflows/docker.yml
vendored
@@ -5,6 +5,9 @@ on:
|
||||
tags:
|
||||
- v**
|
||||
|
||||
env:
|
||||
AWS_REGION: us-west-2
|
||||
|
||||
jobs:
|
||||
event_watcher:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -29,6 +32,29 @@ jobs:
|
||||
scrolltech/event-watcher:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
# build and push to aws ecr
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
|
||||
- name: Build, tag, and push image to Amazon ECR
|
||||
id: build-image
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
ECR_REPOSITORY: event-watcher
|
||||
IMAGE_TAG: ${{github.ref_name}}
|
||||
run: |
|
||||
# Build a docker container and push it to ECR
|
||||
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
|
||||
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
|
||||
gas_oracle:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -52,6 +78,30 @@ jobs:
|
||||
scrolltech/gas-oracle:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
# build and push to aws ecr
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
|
||||
|
||||
- name: Build, tag, and push image to Amazon ECR
|
||||
id: build-image
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
ECR_REPOSITORY: gas-oracle
|
||||
IMAGE_TAG: ${{github.ref_name}}
|
||||
run: |
|
||||
# Build a docker container and push it to ECR
|
||||
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
|
||||
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -75,52 +125,124 @@ jobs:
|
||||
scrolltech/rollup-relayer:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
# build and push to aws ecr
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
|
||||
|
||||
- name: Build, tag, and push image to Amazon ECR
|
||||
id: build-image
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
ECR_REPOSITORY: rollup-relayer
|
||||
IMAGE_TAG: ${{github.ref_name}}
|
||||
run: |
|
||||
# Build a docker container and push it to ECR
|
||||
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
|
||||
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
|
||||
bridgehistoryapi-fetcher:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-fetcher docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/bridgehistoryapi-fetcher:${{github.ref_name}}
|
||||
scrolltech/bridgehistoryapi-fetcher:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-fetcher docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/bridgehistoryapi-fetcher:${{github.ref_name}}
|
||||
scrolltech/bridgehistoryapi-fetcher:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
# build and push to aws ecr
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
|
||||
|
||||
- name: Build, tag, and push image to Amazon ECR
|
||||
id: build-image
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
ECR_REPOSITORY: bridgehistoryapi-fetcher
|
||||
IMAGE_TAG: ${{github.ref_name}}
|
||||
run: |
|
||||
# Build a docker container and push it to ECR
|
||||
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
|
||||
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
|
||||
bridgehistoryapi-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-api docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/bridgehistoryapi-api:${{github.ref_name}}
|
||||
scrolltech/bridgehistoryapi-api:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-api docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/bridgehistoryapi-api:${{github.ref_name}}
|
||||
scrolltech/bridgehistoryapi-api:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
# build and push to aws ecr
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
|
||||
|
||||
- name: Build, tag, and push image to Amazon ECR
|
||||
id: build-image
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
ECR_REPOSITORY: bridgehistoryapi-api
|
||||
IMAGE_TAG: ${{github.ref_name}}
|
||||
run: |
|
||||
# Build a docker container and push it to ECR
|
||||
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
|
||||
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
|
||||
coordinator-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -144,6 +266,30 @@ jobs:
|
||||
scrolltech/coordinator-api:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
# build and push to aws ecr
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
|
||||
|
||||
- name: Build, tag, and push image to Amazon ECR
|
||||
id: build-image
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
ECR_REPOSITORY: coordinator-api
|
||||
IMAGE_TAG: ${{github.ref_name}}
|
||||
run: |
|
||||
# Build a docker container and push it to ECR
|
||||
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
|
||||
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
|
||||
coordinator-cron:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -167,3 +313,26 @@ jobs:
|
||||
scrolltech/coordinator-cron:latest
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
# build and push to aws ecr
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
|
||||
|
||||
- name: Build, tag, and push image to Amazon ECR
|
||||
id: build-image
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
ECR_REPOSITORY: coordinator-cron
|
||||
IMAGE_TAG: ${{github.ref_name}}
|
||||
run: |
|
||||
# Build a docker container and push it to ECR
|
||||
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
|
||||
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2023 Scroll
|
||||
Copyright (c) 2022-2024 Scroll
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
"confirmation": 0,
|
||||
"endpoint": "https://rpc.ankr.com/eth",
|
||||
"startHeight": 18306000,
|
||||
"blockTime": 10,
|
||||
"fetchLimit": 30,
|
||||
"blockTime": 12,
|
||||
"fetchLimit": 16,
|
||||
"MessengerAddr": "0x6774Bcbd5ceCeF1336b5300fb5186a12DDD8b367",
|
||||
"ETHGatewayAddr": "0x7F2b8C31F88B6006c382775eea88297Ec1e3E905",
|
||||
"WETHGatewayAddr": "0x7AC440cAe8EB6328de4fA621163a792c1EA9D4fE",
|
||||
@@ -17,13 +17,14 @@
|
||||
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
|
||||
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
|
||||
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
|
||||
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
|
||||
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
|
||||
"bypassReorgDetection": false
|
||||
},
|
||||
"L2": {
|
||||
"confirmation": 0,
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"blockTime": 3,
|
||||
"fetchLimit": 100,
|
||||
"fetchLimit": 64,
|
||||
"MessengerAddr": "0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC",
|
||||
"ETHGatewayAddr": "0x6EA73e05AdC79974B931123675ea8F78FfdacDF0",
|
||||
"WETHGatewayAddr": "0x7003E7B7186f0E6601203b99F7B8DECBfA391cf9",
|
||||
|
||||
@@ -6,7 +6,6 @@ require (
|
||||
github.com/gin-contrib/cors v1.5.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/google/uuid v1.4.0
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c
|
||||
@@ -40,6 +39,7 @@ require (
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
// LayerConfig is the configuration of Layer1/Layer2
|
||||
type LayerConfig struct {
|
||||
// FetcherConfig is the configuration of Layer1 or Layer2 fetcher.
|
||||
type FetcherConfig struct {
|
||||
Confirmation uint64 `json:"confirmation"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, otherwise in the current implementation, the message proof could not be successfully updated.
|
||||
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, message proof should be updated from the very beginning.
|
||||
BlockTime int64 `json:"blockTime"`
|
||||
FetchLimit uint64 `json:"fetchLimit"`
|
||||
MessengerAddr string `json:"MessengerAddr"`
|
||||
@@ -28,6 +28,7 @@ type LayerConfig struct {
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
GatewayRouterAddr string `json:"GatewayRouterAddr"`
|
||||
MessageQueueAddr string `json:"MessageQueueAddr"`
|
||||
BypassReorgDetection bool `json:"bypassReorgDetection"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
@@ -43,8 +44,8 @@ type RedisConfig struct {
|
||||
|
||||
// Config is the configuration of the bridge history backend
|
||||
type Config struct {
|
||||
L1 *LayerConfig `json:"L1"`
|
||||
L2 *LayerConfig `json:"L2"`
|
||||
L1 *FetcherConfig `json:"L1"`
|
||||
L2 *FetcherConfig `json:"L2"`
|
||||
DB *database.Config `json:"db"`
|
||||
Redis *RedisConfig `json:"redis"`
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
// L1MessageFetcher fetches cross message events from L1 and saves them to database.
|
||||
type L1MessageFetcher struct {
|
||||
ctx context.Context
|
||||
cfg *config.LayerConfig
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
|
||||
l1SyncHeight uint64
|
||||
@@ -35,7 +35,7 @@ type L1MessageFetcher struct {
|
||||
}
|
||||
|
||||
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
|
||||
c := &L1MessageFetcher{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
@@ -108,7 +108,6 @@ func (c *L1MessageFetcher) Start() {
|
||||
}
|
||||
|
||||
func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
c.l1MessageFetcherRunningTotal.Inc()
|
||||
startHeight := c.l1SyncHeight + 1
|
||||
endHeight, rpcErr := utils.GetBlockNumber(c.ctx, c.client, confirmation)
|
||||
if rpcErr != nil {
|
||||
@@ -134,6 +133,7 @@ func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
c.l1MessageFetcherReorgTotal.Inc()
|
||||
log.Warn("L1 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
|
||||
c.updateL1SyncHeight(resyncHeight, lastBlockHash)
|
||||
c.l1MessageFetcherRunningTotal.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -143,6 +143,7 @@ func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
}
|
||||
|
||||
c.updateL1SyncHeight(to, lastBlockHash)
|
||||
c.l1MessageFetcherRunningTotal.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
// L2MessageFetcher fetches cross message events from L2 and saves them to database.
|
||||
type L2MessageFetcher struct {
|
||||
ctx context.Context
|
||||
cfg *config.LayerConfig
|
||||
cfg *config.FetcherConfig
|
||||
db *gorm.DB
|
||||
client *ethclient.Client
|
||||
l2SyncHeight uint64
|
||||
@@ -35,7 +35,7 @@ type L2MessageFetcher struct {
|
||||
}
|
||||
|
||||
// NewL2MessageFetcher creates a new L2MessageFetcher instance.
|
||||
func NewL2MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
|
||||
func NewL2MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
|
||||
c := &L2MessageFetcher{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
@@ -110,7 +110,6 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
return
|
||||
}
|
||||
log.Info("fetch and save missing L2 events", "start height", startHeight, "end height", endHeight, "confirmation", confirmation)
|
||||
c.l2MessageFetcherRunningTotal.Inc()
|
||||
|
||||
for from := startHeight; from <= endHeight; from += c.cfg.FetchLimit {
|
||||
to := from + c.cfg.FetchLimit - 1
|
||||
@@ -128,6 +127,7 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
c.l2MessageFetcherReorgTotal.Inc()
|
||||
log.Warn("L2 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
|
||||
c.updateL2SyncHeight(resyncHeight, lastBlockHash)
|
||||
c.l2MessageFetcherRunningTotal.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -142,6 +142,7 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
|
||||
}
|
||||
|
||||
c.updateL2SyncHeight(to, lastBlockHash)
|
||||
c.l2MessageFetcherRunningTotal.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -76,39 +76,30 @@ func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (ui
|
||||
|
||||
// L1InsertOrUpdate inserts or updates l1 messages
|
||||
func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult *L1FilterResult) error {
|
||||
err := b.db.Transaction(func(tx *gorm.DB) error {
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages, tx); txErr != nil {
|
||||
log.Error("failed to insert L1 deposit messages", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages, tx); txErr != nil {
|
||||
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents, tx); txErr != nil {
|
||||
log.Error("failed to insert or update batch events", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents, tx); txErr != nil {
|
||||
log.Error("failed to insert L1 message queue events", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
|
||||
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l1FetcherResult.RevertedTxs, tx); txErr != nil {
|
||||
log.Error("failed to insert L1 failed gateway router transactions", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error("failed to update db of L1 events", "err", err)
|
||||
if err := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages); err != nil {
|
||||
log.Error("failed to insert L1 deposit messages", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages); err != nil {
|
||||
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents); err != nil {
|
||||
log.Error("failed to insert or update batch events", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents); err != nil {
|
||||
log.Error("failed to insert L1 message queue events", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.crossMessageOrm.InsertFailedL1GatewayTxs(ctx, l1FetcherResult.RevertedTxs); err != nil {
|
||||
log.Error("failed to insert failed L1 gateway transactions", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -186,24 +177,18 @@ func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, heig
|
||||
|
||||
// L2InsertOrUpdate inserts or updates L2 messages
|
||||
func (b *EventUpdateLogic) L2InsertOrUpdate(ctx context.Context, l2FetcherResult *L2FilterResult) error {
|
||||
err := b.db.Transaction(func(tx *gorm.DB) error {
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages, tx); txErr != nil {
|
||||
log.Error("failed to insert L2 withdrawal messages", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
if txErr := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages, tx); txErr != nil {
|
||||
log.Error("failed to update L2 relayed messages of L1 deposits", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l2FetcherResult.OtherRevertedTxs, tx); txErr != nil {
|
||||
log.Error("failed to insert L2 failed gateway router transactions", "err", txErr)
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages); err != nil {
|
||||
log.Error("failed to insert L2 withdrawal messages", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Error("failed to update db of L2 events", "err", err)
|
||||
if err := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages); err != nil {
|
||||
log.Error("failed to update L2 relayed messages of L1 deposits", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.crossMessageOrm.InsertFailedL2GatewayTxs(ctx, l2FetcherResult.OtherRevertedTxs); err != nil {
|
||||
log.Error("failed to insert failed L2 gateway transactions", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -2,6 +2,7 @@ package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -10,21 +11,27 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// L1EventParser the l1 event parser
|
||||
type L1EventParser struct {
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
}
|
||||
|
||||
// NewL1EventParser creates l1 event parser
|
||||
func NewL1EventParser() *L1EventParser {
|
||||
return &L1EventParser{}
|
||||
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1EventParser {
|
||||
return &L1EventParser{
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseL1CrossChainEventLogs parses L1 watched cross chain events.
|
||||
func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l1DepositMessages []*orm.CrossMessage
|
||||
var l1RelayedMessages []*orm.CrossMessage
|
||||
for _, vlog := range logs {
|
||||
@@ -32,7 +39,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1DepositETHSig:
|
||||
event := backendabi.ETHMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ETHGatewayABI, &event, "DepositETH", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DepositETH event", "err", err)
|
||||
log.Error("Failed to unpack DepositETH event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -44,7 +51,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL1ERC20GatewayABI, &event, "DepositERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC20 event", "err", err)
|
||||
log.Error("Failed to unpack DepositERC20 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -57,7 +64,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1DepositERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "DepositERC721", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DepositERC721 event", "err", err)
|
||||
log.Error("Failed to unpack DepositERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -70,7 +77,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1BatchDepositERC721Sig:
|
||||
event := backendabi.BatchERC721MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "BatchDepositERC721", vlog); err != nil {
|
||||
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
|
||||
log.Error("Failed to unpack BatchDepositERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -83,7 +90,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1DepositERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "DepositERC1155", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
|
||||
log.Error("Failed to unpack DepositERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -97,7 +104,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1BatchDepositERC1155Sig:
|
||||
event := backendabi.BatchERC1155MessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog); err != nil {
|
||||
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
|
||||
log.Error("Failed to unpack BatchDepositERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
|
||||
@@ -111,12 +118,17 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1SentMessageEventSig:
|
||||
event := backendabi.L1SentMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
log.Error("Failed to unpack SentMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
from, err := getRealFromAddress(ctx, event.Sender, e.client, vlog.TxHash, e.cfg.GatewayRouterAddr)
|
||||
if err != nil {
|
||||
log.Error("Failed to get real 'from' address", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l1DepositMessages = append(l1DepositMessages, &orm.CrossMessage{
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Sender: from,
|
||||
Receiver: event.Target.String(),
|
||||
TokenType: int(orm.TokenTypeETH),
|
||||
L1TxHash: vlog.TxHash.String(),
|
||||
@@ -130,7 +142,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1RelayedMessageEventSig:
|
||||
event := backendabi.L1RelayedMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "RelayedMessage", vlog); err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
log.Error("Failed to unpack RelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
|
||||
@@ -143,7 +155,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimest
|
||||
case backendabi.L1FailedRelayedMessageEventSig:
|
||||
event := backendabi.L1FailedRelayedMessageEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "FailedRelayedMessage", vlog); err != nil {
|
||||
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
|
||||
log.Error("Failed to unpack FailedRelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
|
||||
@@ -166,17 +178,17 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
case backendabi.L1CommitBatchEventSig:
|
||||
event := backendabi.L1CommitBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "CommitBatch", vlog); err != nil {
|
||||
log.Warn("Failed to unpack CommitBatch event", "err", err)
|
||||
log.Error("Failed to unpack CommitBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
|
||||
if err != nil || isPending {
|
||||
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
}
|
||||
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
@@ -190,7 +202,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
case backendabi.L1RevertBatchEventSig:
|
||||
event := backendabi.L1RevertBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
|
||||
log.Warn("Failed to unpack RevertBatch event", "err", err)
|
||||
log.Error("Failed to unpack RevertBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
@@ -202,7 +214,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
case backendabi.L1FinalizeBatchEventSig:
|
||||
event := backendabi.L1FinalizeBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
|
||||
log.Warn("Failed to unpack FinalizeBatch event", "err", err)
|
||||
log.Error("Failed to unpack FinalizeBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
@@ -229,7 +241,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
case backendabi.L1QueueTransactionEventSig:
|
||||
event := backendabi.L1QueueTransactionEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "QueueTransaction", vlog); err != nil {
|
||||
log.Warn("Failed to unpack QueueTransaction event", "err", err)
|
||||
log.Error("Failed to unpack QueueTransaction event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
messageHash := common.BytesToHash(crypto.Keccak256(event.Data))
|
||||
@@ -245,7 +257,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
case backendabi.L1DequeueTransactionEventSig:
|
||||
event := backendabi.L1DequeueTransactionEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DequeueTransaction", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DequeueTransaction event", "err", err)
|
||||
log.Error("Failed to unpack DequeueTransaction event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
skippedIndices := utils.GetSkippedQueueIndices(event.StartIndex.Uint64(), event.SkippedBitmap)
|
||||
@@ -258,7 +270,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
case backendabi.L1DropTransactionEventSig:
|
||||
event := backendabi.L1DropTransactionEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil {
|
||||
log.Warn("Failed to unpack DropTransaction event", "err", err)
|
||||
log.Error("Failed to unpack DropTransaction event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
|
||||
@@ -270,3 +282,27 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
|
||||
}
|
||||
return l1MessageQueueEvents, nil
|
||||
}
|
||||
|
||||
func getRealFromAddress(ctx context.Context, eventSender common.Address, client *ethclient.Client, txHash common.Hash, gatewayRouterAddr string) (string, error) {
|
||||
from := eventSender.String()
|
||||
if from == gatewayRouterAddr {
|
||||
tx, isPending, rpcErr := client.TransactionByHash(ctx, txHash)
|
||||
if rpcErr != nil || isPending {
|
||||
log.Error("Failed to get transaction or the transaction is still pending", "rpcErr", rpcErr, "isPending", isPending)
|
||||
return "", rpcErr
|
||||
}
|
||||
// Case 1: deposit/withdraw ETH: EOA -> multisig -> gateway router -> messenger.
|
||||
if tx.To() != nil && (*tx.To()).String() != gatewayRouterAddr {
|
||||
return (*tx.To()).String(), nil
|
||||
}
|
||||
// Case 2: deposit/withdraw ETH: EOA -> gateway router -> messenger.
|
||||
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
|
||||
sender, err := signer.Sender(tx)
|
||||
if err != nil {
|
||||
log.Error("Get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", err)
|
||||
return "", err
|
||||
}
|
||||
return sender.String(), nil
|
||||
}
|
||||
return from, nil
|
||||
}
|
||||
|
||||
@@ -34,9 +34,10 @@ type L1FilterResult struct {
|
||||
|
||||
// L1FetcherLogic the L1 fetcher logic
|
||||
type L1FetcherLogic struct {
|
||||
cfg *config.LayerConfig
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
addressList []common.Address
|
||||
gatewayList []common.Address
|
||||
parser *L1EventParser
|
||||
db *gorm.DB
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
@@ -46,7 +47,7 @@ type L1FetcherLogic struct {
|
||||
}
|
||||
|
||||
// NewL1FetcherLogic creates L1 fetcher logic
|
||||
func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
|
||||
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
|
||||
addressList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
@@ -65,16 +66,40 @@ func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
common.HexToAddress(cfg.MessageQueueAddr),
|
||||
}
|
||||
|
||||
gatewayList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.WETHGatewayAddr),
|
||||
common.HexToAddress(cfg.DAIGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.ERC721GatewayAddr),
|
||||
common.HexToAddress(cfg.ERC1155GatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.MessengerAddr),
|
||||
|
||||
common.HexToAddress(cfg.GatewayRouterAddr),
|
||||
}
|
||||
|
||||
// Optional erc20 gateways.
|
||||
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList)
|
||||
// The walkaround is used when SDK mismatches the upstream.
|
||||
if cfg.BypassReorgDetection {
|
||||
log.Warn("bypass reorg detction in L1, setting confirmation as L1ReorgSafeDepth (64)")
|
||||
cfg.Confirmation = L1ReorgSafeDepth
|
||||
}
|
||||
|
||||
log.Info("NewL2FetcherLogic", "bypassReorgDetection", cfg.BypassReorgDetection, "confirmation", cfg.Confirmation, "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
db: db,
|
||||
@@ -83,7 +108,8 @@ func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
addressList: addressList,
|
||||
parser: NewL1EventParser(),
|
||||
gatewayList: gatewayList,
|
||||
parser: NewL1EventParser(cfg, client),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
@@ -131,15 +157,9 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
txTo := tx.To()
|
||||
if txTo == nil {
|
||||
continue
|
||||
}
|
||||
toAddress := txTo.String()
|
||||
|
||||
// GatewayRouter: L1 deposit.
|
||||
// Gateways: L1 deposit.
|
||||
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
|
||||
if toAddress != f.cfg.GatewayRouterAddr && toAddress != f.cfg.MessengerAddr {
|
||||
if !isTransactionToGateway(tx, f.gatewayList) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -233,7 +253,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(eventLogs, blockTimestampsMap)
|
||||
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L1 cross chain event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
|
||||
@@ -1,26 +1,35 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
"scroll-tech/bridge-history-api/internal/orm"
|
||||
"scroll-tech/bridge-history-api/internal/utils"
|
||||
)
|
||||
|
||||
// L2EventParser the L2 event parser
|
||||
type L2EventParser struct {
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
}
|
||||
|
||||
// NewL2EventParser creates the L2 event parser
|
||||
func NewL2EventParser() *L2EventParser {
|
||||
return &L2EventParser{}
|
||||
func NewL2EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L2EventParser {
|
||||
return &L2EventParser{
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseL2EventLogs parses L2 watched events
|
||||
func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
|
||||
var l2WithdrawMessages []*orm.CrossMessage
|
||||
var l2RelayedMessages []*orm.CrossMessage
|
||||
for _, vlog := range logs {
|
||||
@@ -29,7 +38,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.ETHMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ETHGatewayABI, &event, "WithdrawETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawETH event", "err", err)
|
||||
log.Error("Failed to unpack WithdrawETH event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -41,7 +50,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC20GatewayABI, &event, "WithdrawERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
log.Error("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -55,7 +64,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
log.Error("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -69,7 +78,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.BatchERC721MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
|
||||
log.Error("Failed to unpack BatchWithdrawERC721 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -83,7 +92,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
log.Error("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -98,7 +107,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.BatchERC1155MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
|
||||
log.Error("Failed to unpack BatchWithdrawERC1155 event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
|
||||
@@ -113,12 +122,17 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.L2SentMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
log.Error("Failed to unpack SentMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
from, err := getRealFromAddress(ctx, event.Sender, e.client, vlog.TxHash, e.cfg.GatewayRouterAddr)
|
||||
if err != nil {
|
||||
log.Error("Failed to get real 'from' address", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l2WithdrawMessages = append(l2WithdrawMessages, &orm.CrossMessage{
|
||||
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
|
||||
Sender: event.Sender.String(),
|
||||
Sender: from,
|
||||
Receiver: event.Target.String(),
|
||||
TokenType: int(orm.TokenTypeETH),
|
||||
L2TxHash: vlog.TxHash.String(),
|
||||
@@ -137,7 +151,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
log.Error("Failed to unpack RelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
|
||||
@@ -151,7 +165,7 @@ func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap ma
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "FailedRelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
|
||||
log.Error("Failed to unpack FailedRelayedMessage event", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
|
||||
|
||||
@@ -33,9 +33,10 @@ type L2FilterResult struct {
|
||||
|
||||
// L2FetcherLogic the L2 fetcher logic
|
||||
type L2FetcherLogic struct {
|
||||
cfg *config.LayerConfig
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
addressList []common.Address
|
||||
gatewayList []common.Address
|
||||
parser *L2EventParser
|
||||
db *gorm.DB
|
||||
crossMessageOrm *orm.CrossMessage
|
||||
@@ -45,7 +46,7 @@ type L2FetcherLogic struct {
|
||||
}
|
||||
|
||||
// NewL2FetcherLogic create L2 fetcher logic
|
||||
func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
|
||||
func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
|
||||
addressList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
@@ -60,16 +61,38 @@ func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
common.HexToAddress(cfg.MessengerAddr),
|
||||
}
|
||||
|
||||
gatewayList := []common.Address{
|
||||
common.HexToAddress(cfg.ETHGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.WETHGatewayAddr),
|
||||
common.HexToAddress(cfg.DAIGatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.ERC721GatewayAddr),
|
||||
common.HexToAddress(cfg.ERC1155GatewayAddr),
|
||||
|
||||
common.HexToAddress(cfg.MessengerAddr),
|
||||
|
||||
common.HexToAddress(cfg.GatewayRouterAddr),
|
||||
}
|
||||
|
||||
// Optional erc20 gateways.
|
||||
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList)
|
||||
if cfg.BypassReorgDetection {
|
||||
log.Crit("Never bypass reorg detection in L2")
|
||||
}
|
||||
|
||||
log.Info("NewL2FetcherLogic", "bypassReorgDetection", cfg.BypassReorgDetection, "confirmation", cfg.Confirmation, "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L2FetcherLogic{
|
||||
db: db,
|
||||
@@ -78,7 +101,8 @@ func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.C
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
addressList: addressList,
|
||||
parser: NewL2EventParser(),
|
||||
gatewayList: gatewayList,
|
||||
parser: NewL2EventParser(cfg, client),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
@@ -127,42 +151,7 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
txTo := tx.To()
|
||||
if txTo == nil {
|
||||
continue
|
||||
}
|
||||
toAddress := txTo.String()
|
||||
|
||||
// GatewayRouter: L2 withdrawal.
|
||||
if toAddress == f.cfg.GatewayRouterAddr {
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
return nil, nil, nil, receiptErr
|
||||
}
|
||||
|
||||
// Check if the transaction is failed
|
||||
if receipt.Status == types.ReceiptStatusFailed {
|
||||
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
|
||||
sender, signerErr := signer.Sender(tx)
|
||||
if signerErr != nil {
|
||||
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
|
||||
return nil, nil, nil, signerErr
|
||||
}
|
||||
|
||||
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
|
||||
L2TxHash: tx.Hash().String(),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
Sender: sender.String(),
|
||||
Receiver: (*tx.To()).String(),
|
||||
L2BlockNumber: receipt.BlockNumber.Uint64(),
|
||||
BlockTimestamp: block.Time(),
|
||||
TxStatus: int(orm.TxStatusTypeSentTxReverted),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if tx.Type() == types.L1MessageTxType {
|
||||
if tx.IsL1MessageTx() {
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
@@ -179,6 +168,38 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
MessageType: int(orm.MessageTypeL1SentMessage),
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Gateways: L2 withdrawal.
|
||||
if !isTransactionToGateway(tx, f.gatewayList) {
|
||||
continue
|
||||
}
|
||||
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
return nil, nil, nil, receiptErr
|
||||
}
|
||||
|
||||
// Check if the transaction is failed
|
||||
if receipt.Status == types.ReceiptStatusFailed {
|
||||
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
|
||||
sender, signerErr := signer.Sender(tx)
|
||||
if signerErr != nil {
|
||||
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
|
||||
return nil, nil, nil, signerErr
|
||||
}
|
||||
|
||||
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
|
||||
L2TxHash: tx.Hash().String(),
|
||||
MessageType: int(orm.MessageTypeL2SentMessage),
|
||||
Sender: sender.String(),
|
||||
Receiver: (*tx.To()).String(),
|
||||
L2BlockNumber: receipt.BlockNumber.Uint64(),
|
||||
BlockTimestamp: block.Time(),
|
||||
TxStatus: int(orm.TxStatusTypeSentTxReverted),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -235,7 +256,7 @@ func (f *L2FetcherLogic) L2Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(eventLogs, blockTimestampsMap)
|
||||
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L2 event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
@@ -279,3 +300,15 @@ func (f *L2FetcherLogic) updateMetrics(res L2FilterResult) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isTransactionToGateway(tx *types.Transaction, gatewayList []common.Address) bool {
|
||||
if tx.To() == nil {
|
||||
return false
|
||||
}
|
||||
for _, gateway := range gatewayList {
|
||||
if *tx.To() == gateway {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// BatchStatusType represents the type of batch status.
|
||||
@@ -89,19 +90,21 @@ func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, block
|
||||
}
|
||||
|
||||
// InsertOrUpdateBatchEvents inserts a new batch event or updates an existing one based on the BatchStatusType.
|
||||
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent, dbTX ...*gorm.DB) error {
|
||||
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent) error {
|
||||
for _, l1BatchEvent := range l1BatchEvents {
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&BatchEvent{})
|
||||
updateFields := make(map[string]interface{})
|
||||
switch BatchStatusType(l1BatchEvent.BatchStatus) {
|
||||
case BatchStatusTypeCommitted:
|
||||
// Use the clause to either insert or ignore on conflict
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "batch_hash"}},
|
||||
DoNothing: true,
|
||||
})
|
||||
if err := db.Create(l1BatchEvent).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert batch event, error: %w", err)
|
||||
return fmt.Errorf("failed to insert or ignore batch event, error: %w", err)
|
||||
}
|
||||
case BatchStatusTypeFinalized:
|
||||
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
@@ -47,9 +46,9 @@ const (
|
||||
TxStatusTypeSent TxStatusType = iota
|
||||
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
|
||||
TxStatusTypeRelayed // Terminal status.
|
||||
// FailedRelayedMessage event: encoded tx failed, cannot retry. e.g., https://sepolia.scrollscan.com/tx/0xfc7d3ea5ec8dc9b664a5a886c3b33d21e665355057601033481a439498efb79a
|
||||
TxStatusTypeFailedRelayed // Terminal status.
|
||||
// In some cases, user can retry with a larger gas limit. e.g., https://sepolia.scrollscan.com/tx/0x7323a7ba29492cb47d92206411be99b27896f2823cee0633a596b646b73f1b5b
|
||||
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
|
||||
TxStatusTypeFailedRelayed
|
||||
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
|
||||
TxStatusTypeRelayTxReverted
|
||||
TxStatusTypeSkipped
|
||||
TxStatusTypeDropped // Terminal status.
|
||||
@@ -254,38 +253,27 @@ func (c *CrossMessage) GetTxsByAddress(ctx context.Context, sender string) ([]*C
|
||||
}
|
||||
|
||||
// UpdateL1MessageQueueEventsInfo updates the information about L1 message queue events in the database.
|
||||
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent, dbTX ...*gorm.DB) error {
|
||||
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent) error {
|
||||
// update tx statuses.
|
||||
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
// do not over-write terminal statuses.
|
||||
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeFailedRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeDropped)
|
||||
txStatusUpdateFields := make(map[string]interface{})
|
||||
switch l1MessageQueueEvent.EventType {
|
||||
case MessageQueueEventTypeQueueTransaction:
|
||||
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
|
||||
// replayMessage case:
|
||||
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
|
||||
// Transaction reverted in L2: https://sepolia.scrollscan.com/tx/0xde6ef307a7da255888aad7a4c40a6b8c886e46a8a05883070bbf18b736cbfb8c
|
||||
// replayMessage: https://sepolia.etherscan.io/tx/0xa5392891232bb32d98fcdbaca0d91b4d22ef2755380d07d982eebd47b147ce28
|
||||
//
|
||||
// Note: update l1_tx_hash if the user calls replayMessage, cannot use queue index here,
|
||||
// because in replayMessage, queue index != message nonce.
|
||||
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
|
||||
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeSent // reset status to "sent".
|
||||
continue
|
||||
case MessageQueueEventTypeDequeueTransaction:
|
||||
// do not over-write terminal statuses.
|
||||
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeDropped)
|
||||
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeSkipped
|
||||
case MessageQueueEventTypeDropTransaction:
|
||||
// do not over-write terminal statuses.
|
||||
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
|
||||
db = db.Where("tx_status != ?", TxStatusTypeDropped)
|
||||
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txStatusUpdateFields["tx_status"] = TxStatusTypeDropped
|
||||
@@ -298,15 +286,22 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
|
||||
// update tx hashes of replay and refund.
|
||||
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
txHashUpdateFields := make(map[string]interface{})
|
||||
switch l1MessageQueueEvent.EventType {
|
||||
case MessageQueueEventTypeDequeueTransaction:
|
||||
continue
|
||||
case MessageQueueEventTypeQueueTransaction:
|
||||
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
|
||||
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.
|
||||
// replayMessage case:
|
||||
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
|
||||
// Transaction reverted in L2: https://sepolia.scrollscan.com/tx/0xde6ef307a7da255888aad7a4c40a6b8c886e46a8a05883070bbf18b736cbfb8c
|
||||
// replayMessage: https://sepolia.etherscan.io/tx/0xa5392891232bb32d98fcdbaca0d91b4d22ef2755380d07d982eebd47b147ce28
|
||||
//
|
||||
// Note: update l1_tx_hash if the user calls replayMessage, cannot use queue index here,
|
||||
// because in replayMessage, queue index != message nonce.
|
||||
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
|
||||
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
|
||||
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
|
||||
case MessageQueueEventTypeDropTransaction:
|
||||
@@ -314,11 +309,8 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
|
||||
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
|
||||
txHashUpdateFields["l1_refund_tx_hash"] = l1MessageQueueEvent.TxHash.String()
|
||||
}
|
||||
// Check if there are fields to update to avoid empty update operation (skip message).
|
||||
if len(txHashUpdateFields) > 0 {
|
||||
if err := db.Updates(txHashUpdateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
|
||||
}
|
||||
if err := db.Updates(txHashUpdateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -362,14 +354,11 @@ func (c *CrossMessage) UpdateBatchIndexRollupStatusMerkleProofOfL2Messages(ctx c
|
||||
}
|
||||
|
||||
// InsertOrUpdateL1Messages inserts or updates a list of L1 cross messages into the database.
|
||||
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
|
||||
@@ -384,18 +373,14 @@ func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []
|
||||
}
|
||||
|
||||
// InsertOrUpdateL2Messages inserts or updates a list of L2 cross messages into the database.
|
||||
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
|
||||
// The merkle_proof is updated separately in batch status updates and hence is not included here.
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "message_hash"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"sender", "receiver", "token_type", "l2_block_number", "l2_tx_hash", "l1_token_address", "l2_token_address", "token_ids", "token_amounts", "message_type", "block_timestamp", "message_from", "message_to", "message_value", "message_data", "message_nonce"}),
|
||||
@@ -406,31 +391,60 @@ func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertFailedGatewayRouterTxs inserts a list of transactions that failed to interact with the gateway router into the database.
|
||||
// These failed transactions are only fetched once, so they are inserted without checking for duplicates.
|
||||
// To resolve unique index confliction, a random UUID will be generated and used as the MessageHash.
|
||||
func (c *CrossMessage) InsertFailedGatewayRouterTxs(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
// InsertFailedL2GatewayTxs inserts a list of transactions that failed to interact with the L2 gateways into the database.
|
||||
// To resolve unique index confliction, L2 tx hash is used as the MessageHash.
|
||||
// The OnConflict clause is used to prevent inserting same failed transactions multiple times.
|
||||
func (c *CrossMessage) InsertFailedL2GatewayTxs(ctx context.Context, messages []*CrossMessage) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
|
||||
for _, message := range messages {
|
||||
message.MessageHash = message.L2TxHash
|
||||
}
|
||||
|
||||
db := c.db
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
for _, message := range messages {
|
||||
message.MessageHash = uuid.New().String()
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "message_hash"}},
|
||||
DoNothing: true,
|
||||
})
|
||||
|
||||
if err := db.Create(&messages).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
|
||||
}
|
||||
if err := db.Create(messages).Error; err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertFailedL1GatewayTxs inserts a list of transactions that failed to interact with the L1 gateways into the database.
|
||||
// To resolve unique index confliction, L1 tx hash is used as the MessageHash.
|
||||
// The OnConflict clause is used to prevent inserting same failed transactions multiple times.
|
||||
func (c *CrossMessage) InsertFailedL1GatewayTxs(ctx context.Context, messages []*CrossMessage) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, message := range messages {
|
||||
message.MessageHash = message.L1TxHash
|
||||
}
|
||||
|
||||
db := c.db
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "message_hash"}},
|
||||
DoNothing: true,
|
||||
})
|
||||
|
||||
if err := db.Create(&messages).Error; err != nil {
|
||||
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertOrUpdateL2RelayedMessagesOfL1Deposits inserts or updates the database with a list of L2 relayed messages related to L1 deposits.
|
||||
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage) error {
|
||||
if len(l2RelayedMessages) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -459,7 +473,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
|
||||
for _, msg := range mergedL2RelayedMessages {
|
||||
uniqueL2RelayedMessages = append(uniqueL2RelayedMessages, msg)
|
||||
}
|
||||
// Do not update tx status of successfully or failed relayed messages,
|
||||
// Do not update tx status of successfully relayed messages,
|
||||
// because if a message is handled, the later relayed message tx would be reverted.
|
||||
// ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L2/L2ScrollMessenger.sol#L102
|
||||
// e.g.,
|
||||
@@ -476,7 +490,6 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
|
||||
clause.And(
|
||||
// do not over-write terminal statuses.
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
|
||||
),
|
||||
},
|
||||
@@ -489,7 +502,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
|
||||
}
|
||||
|
||||
// InsertOrUpdateL1RelayedMessagesOfL2Withdrawals inserts or updates the database with a list of L1 relayed messages related to L2 withdrawals.
|
||||
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
|
||||
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage) error {
|
||||
if len(l1RelayedMessages) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -519,9 +532,6 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
|
||||
uniqueL1RelayedMessages = append(uniqueL1RelayedMessages, msg)
|
||||
}
|
||||
db := c.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
@@ -532,7 +542,6 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
|
||||
clause.And(
|
||||
// do not over-write terminal statuses.
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
|
||||
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
|
||||
),
|
||||
},
|
||||
|
||||
@@ -15,6 +15,7 @@ CREATE TABLE batch_event_v2
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS unique_idx_be_batch_hash ON batch_event_v2 (batch_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_be_l1_block_number ON batch_event_v2 (l1_block_number);
|
||||
CREATE INDEX IF NOT EXISTS idx_be_batch_index ON batch_event_v2 (batch_index);
|
||||
CREATE INDEX IF NOT EXISTS idx_be_batch_index_batch_hash ON batch_event_v2 (batch_index, batch_hash);
|
||||
|
||||
@@ -21,8 +21,8 @@ const (
|
||||
// GasOracleImported represents the gas oracle status is imported
|
||||
GasOracleImported
|
||||
|
||||
// GasOracleFailed represents the gas oracle status is failed
|
||||
GasOracleFailed
|
||||
// GasOracleImportedFailed represents the gas oracle status is imported failed
|
||||
GasOracleImportedFailed
|
||||
)
|
||||
|
||||
func (s GasOracleStatus) String() string {
|
||||
@@ -35,10 +35,10 @@ func (s GasOracleStatus) String() string {
|
||||
return "GasOracleImporting"
|
||||
case GasOracleImported:
|
||||
return "GasOracleImported"
|
||||
case GasOracleFailed:
|
||||
return "GasOracleFailed"
|
||||
case GasOracleImportedFailed:
|
||||
return "GasOracleImportedFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Undefined (%d)", int32(s))
|
||||
return fmt.Sprintf("Undefined GasOracleStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +159,7 @@ func (ps ProvingStatus) String() string {
|
||||
case ProvingTaskFailed:
|
||||
return "failed"
|
||||
default:
|
||||
return fmt.Sprintf("Undefined (%d)", int32(ps))
|
||||
return fmt.Sprintf("Undefined ProvingStatus (%d)", int32(ps))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,7 +184,7 @@ func (s ChunkProofsStatus) String() string {
|
||||
case ChunkProofsStatusReady:
|
||||
return "ChunkProofsStatusReady"
|
||||
default:
|
||||
return fmt.Sprintf("Undefined (%d)", int32(s))
|
||||
return fmt.Sprintf("Undefined ChunkProofsStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,6 +227,69 @@ func (s RollupStatus) String() string {
|
||||
case RollupFinalizeFailed:
|
||||
return "RollupFinalizeFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Undefined (%d)", int32(s))
|
||||
return fmt.Sprintf("Undefined RollupStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// SenderType defines the various types of senders sending the transactions.
|
||||
type SenderType int
|
||||
|
||||
const (
|
||||
// SenderTypeUnknown indicates an unknown sender type.
|
||||
SenderTypeUnknown SenderType = iota
|
||||
// SenderTypeCommitBatch indicates the sender is responsible for committing batches.
|
||||
SenderTypeCommitBatch
|
||||
// SenderTypeFinalizeBatch indicates the sender is responsible for finalizing batches.
|
||||
SenderTypeFinalizeBatch
|
||||
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
|
||||
SenderTypeL1GasOracle
|
||||
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
|
||||
SenderTypeL2GasOracle
|
||||
)
|
||||
|
||||
// String returns a string representation of the SenderType.
|
||||
func (t SenderType) String() string {
|
||||
switch t {
|
||||
case SenderTypeCommitBatch:
|
||||
return "SenderTypeCommitBatch"
|
||||
case SenderTypeFinalizeBatch:
|
||||
return "SenderTypeFinalizeBatch"
|
||||
case SenderTypeL1GasOracle:
|
||||
return "SenderTypeL1GasOracle"
|
||||
case SenderTypeL2GasOracle:
|
||||
return "SenderTypeL2GasOracle"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
|
||||
}
|
||||
}
|
||||
|
||||
// TxStatus represents the current status of a transaction in the transaction lifecycle.
|
||||
type TxStatus int
|
||||
|
||||
const (
|
||||
// TxStatusUnknown represents an undefined status of the transaction.
|
||||
TxStatusUnknown TxStatus = iota
|
||||
// TxStatusPending indicates that the transaction is yet to be processed.
|
||||
TxStatusPending
|
||||
// TxStatusReplaced indicates that the transaction has been replaced by another one, typically due to a higher gas price.
|
||||
TxStatusReplaced
|
||||
// TxStatusConfirmed indicates that the transaction has been successfully processed and confirmed.
|
||||
TxStatusConfirmed
|
||||
// TxStatusConfirmedFailed indicates that the transaction has failed during processing.
|
||||
TxStatusConfirmedFailed
|
||||
)
|
||||
|
||||
func (s TxStatus) String() string {
|
||||
switch s {
|
||||
case TxStatusPending:
|
||||
return "TxStatusPending"
|
||||
case TxStatusReplaced:
|
||||
return "TxStatusReplaced"
|
||||
case TxStatusConfirmed:
|
||||
return "TxStatusConfirmed"
|
||||
case TxStatusConfirmedFailed:
|
||||
return "TxStatusConfirmedFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestProvingStatus(t *testing.T) {
|
||||
{
|
||||
"Undefined",
|
||||
ProvingStatus(999), // Invalid value.
|
||||
"Undefined (999)",
|
||||
"Undefined ProvingStatus (999)",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -85,3 +85,243 @@ func TestProvingStatus(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollupStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s RollupStatus
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"RollupUndefined",
|
||||
RollupUndefined,
|
||||
"Undefined RollupStatus (0)",
|
||||
},
|
||||
{
|
||||
"RollupPending",
|
||||
RollupPending,
|
||||
"RollupPending",
|
||||
},
|
||||
{
|
||||
"RollupCommitting",
|
||||
RollupCommitting,
|
||||
"RollupCommitting",
|
||||
},
|
||||
{
|
||||
"RollupCommitted",
|
||||
RollupCommitted,
|
||||
"RollupCommitted",
|
||||
},
|
||||
{
|
||||
"RollupFinalizing",
|
||||
RollupFinalizing,
|
||||
"RollupFinalizing",
|
||||
},
|
||||
{
|
||||
"RollupFinalized",
|
||||
RollupFinalized,
|
||||
"RollupFinalized",
|
||||
},
|
||||
{
|
||||
"RollupCommitFailed",
|
||||
RollupCommitFailed,
|
||||
"RollupCommitFailed",
|
||||
},
|
||||
{
|
||||
"RollupFinalizeFailed",
|
||||
RollupFinalizeFailed,
|
||||
"RollupFinalizeFailed",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
RollupStatus(999),
|
||||
"Undefined RollupStatus (999)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, tt.s.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSenderType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
t SenderType
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"SenderTypeUnknown",
|
||||
SenderTypeUnknown,
|
||||
"Unknown SenderType (0)",
|
||||
},
|
||||
{
|
||||
"SenderTypeCommitBatch",
|
||||
SenderTypeCommitBatch,
|
||||
"SenderTypeCommitBatch",
|
||||
},
|
||||
{
|
||||
"SenderTypeFinalizeBatch",
|
||||
SenderTypeFinalizeBatch,
|
||||
"SenderTypeFinalizeBatch",
|
||||
},
|
||||
{
|
||||
"SenderTypeL1GasOracle",
|
||||
SenderTypeL1GasOracle,
|
||||
"SenderTypeL1GasOracle",
|
||||
},
|
||||
{
|
||||
"SenderTypeL2GasOracle",
|
||||
SenderTypeL2GasOracle,
|
||||
"SenderTypeL2GasOracle",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
SenderType(999),
|
||||
"Unknown SenderType (999)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, tt.t.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s TxStatus
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"TxStatusUnknown",
|
||||
TxStatusUnknown,
|
||||
"Unknown TxStatus (0)",
|
||||
},
|
||||
{
|
||||
"TxStatusPending",
|
||||
TxStatusPending,
|
||||
"TxStatusPending",
|
||||
},
|
||||
{
|
||||
"TxStatusReplaced",
|
||||
TxStatusReplaced,
|
||||
"TxStatusReplaced",
|
||||
},
|
||||
{
|
||||
"TxStatusConfirmed",
|
||||
TxStatusConfirmed,
|
||||
"TxStatusConfirmed",
|
||||
},
|
||||
{
|
||||
"TxStatusConfirmedFailed",
|
||||
TxStatusConfirmedFailed,
|
||||
"TxStatusConfirmedFailed",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
TxStatus(999),
|
||||
"Unknown TxStatus (999)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, tt.s.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGasOracleStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s GasOracleStatus
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"GasOracleUndefined",
|
||||
GasOracleUndefined,
|
||||
"GasOracleUndefined",
|
||||
},
|
||||
{
|
||||
"GasOraclePending",
|
||||
GasOraclePending,
|
||||
"GasOraclePending",
|
||||
},
|
||||
{
|
||||
"GasOracleImporting",
|
||||
GasOracleImporting,
|
||||
"GasOracleImporting",
|
||||
},
|
||||
{
|
||||
"GasOracleImported",
|
||||
GasOracleImported,
|
||||
"GasOracleImported",
|
||||
},
|
||||
{
|
||||
"GasOracleImportedFailed",
|
||||
GasOracleImportedFailed,
|
||||
"GasOracleImportedFailed",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
GasOracleStatus(999),
|
||||
"Undefined GasOracleStatus (999)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, tt.s.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProverTaskFailureType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
r ProverTaskFailureType
|
||||
want string
|
||||
}{
|
||||
{
|
||||
"ProverTaskFailureTypeUndefined",
|
||||
ProverTaskFailureTypeUndefined,
|
||||
"prover task failure undefined",
|
||||
},
|
||||
{
|
||||
"ProverTaskFailureTypeTimeout",
|
||||
ProverTaskFailureTypeTimeout,
|
||||
"prover task failure timeout",
|
||||
},
|
||||
{
|
||||
"ProverTaskFailureTypeSubmitStatusNotOk",
|
||||
ProverTaskFailureTypeSubmitStatusNotOk,
|
||||
"prover task failure validated submit proof status not ok",
|
||||
},
|
||||
{
|
||||
"ProverTaskFailureTypeVerifiedFailed",
|
||||
ProverTaskFailureTypeVerifiedFailed,
|
||||
"prover task failure verified failed",
|
||||
},
|
||||
{
|
||||
"ProverTaskFailureTypeServerError",
|
||||
ProverTaskFailureTypeServerError,
|
||||
"prover task failure server exception",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
ProverTaskFailureType(999),
|
||||
"illegal prover task failure type (999)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, tt.r.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.53"
|
||||
var tag = "v4.3.59"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
10
contracts/.solcover.js
Normal file
10
contracts/.solcover.js
Normal file
@@ -0,0 +1,10 @@
|
||||
module.exports = {
|
||||
skipFiles: [
|
||||
'mocks',
|
||||
'test',
|
||||
'L2/predeploys/L1BlockContainer.sol',
|
||||
'libraries/verifier/ZkTrieVerifier.sol',
|
||||
'libraries/verifier/PatriciaMerkleTrieVerifier.sol'
|
||||
],
|
||||
istanbulReporter: ["lcov", "json"]
|
||||
};
|
||||
2
contracts/circomlib.d.ts
vendored
2
contracts/circomlib.d.ts
vendored
@@ -1 +1,3 @@
|
||||
declare module "circomlib/src/evmasm";
|
||||
declare module "circomlib/src/poseidon_gencontract";
|
||||
declare module "circomlib/src/poseidon_constants";
|
||||
|
||||
@@ -20,7 +20,6 @@ sender = '0x00a329c0648769a73afac7f9381e08fb43dbea72' # the address of `
|
||||
tx_origin = '0x00a329c0648769a73afac7f9381e08fb43dbea72' # the address of `tx.origin` in tests
|
||||
initial_balance = '0xffffffffffffffffffffffff' # the initial balance of the test contract
|
||||
block_number = 0 # the block number we are at in tests
|
||||
chain_id = 99 # the chain id we are on in tests
|
||||
gas_limit = 9223372036854775807 # the gas limit in tests
|
||||
gas_price = 0 # the gas price (in wei) in tests
|
||||
block_base_fee_per_gas = 0 # the base fee (in wei) in tests
|
||||
|
||||
@@ -334,6 +334,46 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
"L1GatewayRouter.depositERC20 USDC after upgrade"
|
||||
);
|
||||
});
|
||||
|
||||
it.skip("should succeed on L1LidoGateway", async () => {
|
||||
const L1_WSTETH = "0x7f39C581F595B53c5cb19bD0b3f8dA6c935E2Ca0";
|
||||
const L2_WSTETH = "0xf610A9dfB7C89644979b4A0f27063E9e7d7Cda32";
|
||||
const L1_GATEWAY = "0x6625C6332c9F91F2D27c304E729B86db87A3f504";
|
||||
const L2_GATEWAY = "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9";
|
||||
const L1LidoGateway = await ethers.getContractFactory("L1LidoGateway", deployer);
|
||||
const impl = await L1LidoGateway.deploy(L1_WSTETH, L2_WSTETH, L2_GATEWAY, L1_ROUTER, L1_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L1LidoGateway", L1_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseUnits("1", 6);
|
||||
const fee = await queue.estimateCrossDomainMessageFee(1e6);
|
||||
const token = await ethers.getContractAt("MockERC20", L1_WSTETH, deployer);
|
||||
await mockERC20Balance(token.address, amountIn.mul(10), 0);
|
||||
await token.approve(L1_GATEWAY, constants.MaxUint256);
|
||||
await token.approve(L1_ROUTER, constants.MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
await gateway["depositERC20(address,uint256,uint256)"](L1_WSTETH, amountIn, 1e6, { value: fee }),
|
||||
"L1LidoGateway.depositERC20 wstETH before upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await router["depositERC20(address,uint256,uint256)"](L1_WSTETH, amountIn, 1e6, { value: fee }),
|
||||
"L1GatewayRouter.depositERC20 wstETH before upgrade"
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL1(L1_GATEWAY, impl.address);
|
||||
await gateway.initializeV2(deployer.address, deployer.address, deployer.address, deployer.address);
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
await gateway["depositERC20(address,uint256,uint256)"](L1_WSTETH, amountIn, 1e6, { value: fee }),
|
||||
"L1LidoGateway.depositERC20 wstETH after upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await router["depositERC20(address,uint256,uint256)"](L1_WSTETH, amountIn, 1e6, { value: fee }),
|
||||
"L1GatewayRouter.depositERC20 wstETH after upgrade"
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
context("L2 upgrade", async () => {
|
||||
@@ -584,5 +624,44 @@ describe("GasOptimizationUpgrade.spec", async () => {
|
||||
"L2GatewayRouter.withdrawERC20 USDC after upgrade"
|
||||
);
|
||||
});
|
||||
|
||||
it.skip("should succeed on L2LidoGateway", async () => {
|
||||
const L1_WSTETH = "0x7f39C581F595B53c5cb19bD0b3f8dA6c935E2Ca0";
|
||||
const L2_WSTETH = "0xf610A9dfB7C89644979b4A0f27063E9e7d7Cda32";
|
||||
const L1_GATEWAY = "0x6625C6332c9F91F2D27c304E729B86db87A3f504";
|
||||
const L2_GATEWAY = "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9";
|
||||
const L2LidoGateway = await ethers.getContractFactory("L2LidoGateway", deployer);
|
||||
const impl = await L2LidoGateway.deploy(L1_WSTETH, L2_WSTETH, L1_GATEWAY, L2_ROUTER, L2_MESSENGER);
|
||||
const gateway = await ethers.getContractAt("L2LidoGateway", L2_GATEWAY, deployer);
|
||||
const amountIn = ethers.utils.parseUnits("1", 6);
|
||||
const token = await ethers.getContractAt("MockERC20", L2_WSTETH, deployer);
|
||||
await mockERC20Balance(token.address, amountIn.mul(10), 51);
|
||||
await token.approve(L2_GATEWAY, constants.MaxUint256);
|
||||
await token.approve(L2_ROUTER, constants.MaxUint256);
|
||||
|
||||
// before upgrade
|
||||
await showGasUsage(
|
||||
await gateway["withdrawERC20(address,uint256,uint256)"](L2_WSTETH, amountIn, 1e6),
|
||||
"L2LidoGateway.withdrawERC20 wstETH before upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await router["withdrawERC20(address,uint256,uint256)"](L2_WSTETH, amountIn, 1e6),
|
||||
"L2GatewayRouter.withdrawERC20 wstETH before upgrade"
|
||||
);
|
||||
|
||||
// do upgrade
|
||||
await upgradeL2(L2_GATEWAY, impl.address);
|
||||
await gateway.initializeV2(deployer.address, deployer.address, deployer.address, deployer.address);
|
||||
|
||||
// after upgrade
|
||||
await showGasUsage(
|
||||
await gateway["withdrawERC20(address,uint256,uint256)"](L2_WSTETH, amountIn, 1e6),
|
||||
"L2LidoGateway.withdrawERC20 wstETH after upgrade"
|
||||
);
|
||||
await showGasUsage(
|
||||
await router["withdrawERC20(address,uint256,uint256)"](L2_WSTETH, amountIn, 1e6),
|
||||
"L2GatewayRouter.withdrawERC20 wstETH after upgrade"
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
97
contracts/integration-test/PoseidonHash.spec.ts
Normal file
97
contracts/integration-test/PoseidonHash.spec.ts
Normal file
@@ -0,0 +1,97 @@
|
||||
/* eslint-disable node/no-missing-import */
|
||||
/* eslint-disable node/no-unpublished-import */
|
||||
import { expect } from "chai";
|
||||
import { randomBytes } from "crypto";
|
||||
import { BigNumber, Contract } from "ethers";
|
||||
import { ethers } from "hardhat";
|
||||
import fs from "fs";
|
||||
|
||||
import PoseidonWithoutDomain from "circomlib/src/poseidon_gencontract";
|
||||
import { generateABI, createCode } from "../scripts/poseidon";
|
||||
|
||||
describe("PoseidonHash.spec", async () => {
|
||||
// test against with circomlib's implementation.
|
||||
context("domain = zero", async () => {
|
||||
let poseidonCircom: Contract;
|
||||
let poseidon: Contract;
|
||||
|
||||
beforeEach(async () => {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
|
||||
const PoseidonWithoutDomainFactory = new ethers.ContractFactory(
|
||||
PoseidonWithoutDomain.generateABI(2),
|
||||
PoseidonWithoutDomain.createCode(2),
|
||||
deployer
|
||||
);
|
||||
poseidonCircom = await PoseidonWithoutDomainFactory.deploy();
|
||||
await poseidonCircom.deployed();
|
||||
|
||||
const PoseidonWithDomainFactory = new ethers.ContractFactory(generateABI(2), createCode(2), deployer);
|
||||
poseidon = await PoseidonWithDomainFactory.deploy();
|
||||
await poseidon.deployed();
|
||||
});
|
||||
|
||||
it("should succeed on zero inputs", async () => {
|
||||
expect(await poseidonCircom["poseidon(uint256[2])"]([0, 0])).to.eq(
|
||||
await poseidon["poseidon(uint256[2],uint256)"]([0, 0], 0)
|
||||
);
|
||||
});
|
||||
|
||||
it("should succeed on random inputs", async () => {
|
||||
for (let bytes = 1; bytes <= 32; ++bytes) {
|
||||
for (let i = 0; i < 5; ++i) {
|
||||
const a = randomBytes(bytes);
|
||||
const b = randomBytes(bytes);
|
||||
expect(await poseidonCircom["poseidon(uint256[2])"]([a, b])).to.eq(
|
||||
await poseidon["poseidon(uint256[2],uint256)"]([a, b], 0)
|
||||
);
|
||||
expect(await poseidonCircom["poseidon(uint256[2])"]([a, 0])).to.eq(
|
||||
await poseidon["poseidon(uint256[2],uint256)"]([a, 0], 0)
|
||||
);
|
||||
expect(await poseidonCircom["poseidon(uint256[2])"]([0, b])).to.eq(
|
||||
await poseidon["poseidon(uint256[2],uint256)"]([0, b], 0)
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// test against with scroll's go implementation.
|
||||
context("domain = nonzero", async () => {
|
||||
let poseidonCircom: Contract;
|
||||
let poseidon: Contract;
|
||||
|
||||
beforeEach(async () => {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
|
||||
const PoseidonWithoutDomainFactory = new ethers.ContractFactory(
|
||||
PoseidonWithoutDomain.generateABI(2),
|
||||
PoseidonWithoutDomain.createCode(2),
|
||||
deployer
|
||||
);
|
||||
poseidonCircom = await PoseidonWithoutDomainFactory.deploy();
|
||||
await poseidonCircom.deployed();
|
||||
|
||||
const PoseidonWithDomainFactory = new ethers.ContractFactory(generateABI(2), createCode(2), deployer);
|
||||
poseidon = await PoseidonWithDomainFactory.deploy();
|
||||
await poseidon.deployed();
|
||||
});
|
||||
|
||||
it("should succeed on zero inputs", async () => {
|
||||
expect(await poseidon["poseidon(uint256[2],uint256)"]([0, 0], 6)).to.eq(
|
||||
BigNumber.from("17848312925884193353134534408113064827548730776291701343555436351962284922129")
|
||||
);
|
||||
expect(await poseidon["poseidon(uint256[2],uint256)"]([0, 0], 7)).to.eq(
|
||||
BigNumber.from("20994231331856095272861976502721128670019193481895476667943874333621461724676")
|
||||
);
|
||||
});
|
||||
|
||||
it("should succeed on random inputs", async () => {
|
||||
const lines = String(fs.readFileSync("./integration-test/testdata/poseidon_hash_with_domain.data")).split("\n");
|
||||
for (const line of lines) {
|
||||
const [domain, a, b, hash] = line.split(" ");
|
||||
expect(await poseidon["poseidon(uint256[2],uint256)"]([a, b], domain)).to.eq(BigNumber.from(hash));
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -5,7 +5,9 @@ import { concat } from "ethers/lib/utils";
|
||||
import { ethers } from "hardhat";
|
||||
import { MockZkTrieVerifier } from "../typechain";
|
||||
|
||||
import poseidonUnit from "circomlib/src/poseidon_gencontract";
|
||||
import { generateABI, createCode } from "../scripts/poseidon";
|
||||
|
||||
const chars = "0123456789abcdef";
|
||||
|
||||
interface ITestConfig {
|
||||
block: number;
|
||||
@@ -20,170 +22,245 @@ interface ITestConfig {
|
||||
|
||||
const testcases: Array<ITestConfig> = [
|
||||
{
|
||||
block: 95216,
|
||||
desc: "contract with storage",
|
||||
// curl -H "content-type: application/json" -X POST --data '{"id":0,"jsonrpc":"2.0","method":"eth_getProof","params":["0x5300000000000000000000000000000000000004", ["0x8391082587ea494a8beba02cc40273f27e5477a967cd400736ac46950da0b378"], "0x1111ad"]}' https://rpc.scroll.io
|
||||
block: 1118637,
|
||||
desc: "WETH.balance[0xa7994f02237aed2c116a702a8f5322a1fb325b31]",
|
||||
account: "0x5300000000000000000000000000000000000004",
|
||||
storage: "0x9505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a",
|
||||
expectedRoot: "0x2dc794537b959b575dc216cd11389d802f9389fce7183278561a824aa8e950e2",
|
||||
expectedValue: "0x00000000000000000000000000000000000000000000000111346048bf18a14a",
|
||||
storage: "0x8391082587ea494a8beba02cc40273f27e5477a967cd400736ac46950da0b378",
|
||||
expectedRoot: "0x1334a21a74914182745c1f5142e70b487262096784ae7669186657462c01b103",
|
||||
expectedValue: "0x00000000000000000000000000000000000000000000000000006239b5a2c000",
|
||||
accountProof: [
|
||||
"0x001988ce414103e97cb80613adde47d5fe0611b30087d1cfcdb84284c42907467e24ac744be2edcb86cdfc42a9bbb7b2a270649161c3ce3a41d3ad5a26927d2c79",
|
||||
"0x0028db7c407cab6652f1f194401bd87bda33c9a1723b4f93515bd5929cad02668123fa5a3e69136c8e03a62c805f89c9d3578a6f5fac4bb281fc4d7df12fbcc5db",
|
||||
"0x000376d1bfe3d5c6afffb5707a34003209c57fbf15430daf0f8022b4df2bb947460ab4fda7be343efd34af2420e8e9d4268f436cb7700a005086df4eba083407c8",
|
||||
"0x0025df09dd66dd9d8b5b1abb82cee9985a2addd12e7f5671c910e27644ccaf498c2a2d7021169172e380831f43a00f0a3bef8576c7c74ac98fd7e7b1ec443ac92e",
|
||||
"0x00218d51f8e754bf89062007dd765b50b7385bbb4a57db258ac8dcf9ad69b6f4552ddc5a17cec74d8e8f06e16c0a6112023c34d6c001060bc783ab4d06a4a9801a",
|
||||
"0x001166c2eedfbbb4568ec27c57b2729437c0c8c38161fad643f03f76fbd807e712286d86bfdceb6729daedb6f219dd0f6080386d9a2a8f9c1dcb89792c8754e125",
|
||||
"0x0028fd666ed406e277f6496bcac13af8b303b58c74be937399095399f4dd141c6f2876f81684c2546ff90b221ba2fe1290e671770af08fd545868e3351401b1503",
|
||||
"0x000b9245c7ccc1eab305d40cced5e8aac6c8ddb877451075185bb7a6c1a4973a5d2852ce761c8e417a5f604a6ef4196ec101014aa1d1e4c684d1b5b8cbec5c37b1",
|
||||
"0x0019755e50ef22e13ae17cbc33d9e708ee9efc011941b3a920bc65da9825b04eb029a43488e5584b68d1a98a215f03f31e063734a3305600f9feed11607271d0d3",
|
||||
"0x002e10cc0afbf5b336e6a6eeae0c863df7a7c2ba61c599618fb973aeff397918e523b18c08a19fa6bc964ae41c56af610ab43d948db94ad2543e9807a5a0f1d2f0",
|
||||
"0x00247f3f0cebebf749e27c8ffd81e9919cab114bd3d75029e3260e99b6c7fe551d06a69531144f521b68d1a2c7450f5a20146efdaf7b47271782bb8746a023cf84",
|
||||
"0x0029ad88f0ee7198edcae37ab88efb2a27ea8956d6b988264b227843c175743c4329916ead363e6adfc27f400977d2d9efb1f896616a18d71e2702ec8201b82c57",
|
||||
"0x002a1de55ee84561850354085516a1101705f8240b8f1e1f9aea3a464650d637a52fad2de438ac5851b0e28508af90bd385dbcad5df8ea23ca78792f094ff7ca0d",
|
||||
"0x001ba118afa1977f1fda1411cd1c7f145ab97a35b1e724060d5cfc3d58b27141ee2b0a8dbf3d494c6a1bf6456c4de00de8e2f0d9be0716a3ca78d4df28948f975b",
|
||||
"0x0025bdbf508c1e3808415136bfdd6dfb548c33348d813882b0d405913405d575010c60f95c658dc8113f7c97935a35d78c23dba131c25866fc8d93920e318d2450",
|
||||
"0x0007bc3ec4d80df884c4d87f4541ffa522046a4c52e6cccb9ff7376ff56149e5d21b87a56676f679f4b8b4478c8a3aa80a09127258cccd4aa373a5c7c2344d2d03",
|
||||
"0x010aef26efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071d0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681205300000000000000000000000000000000000004000000000000000000000000",
|
||||
"0x0907d980105678a2007eb5683d850f36a9caafe6e7fd3279987d7a94a13a360d3a1478f9a4c1f8c755227ee3544929bb0d7cfa2d999a48493d048ff0250bb002ab",
|
||||
"0x092b59a024f142555555c767842c4fcc3996686c57699791fcb10013f69ffd9b2507360087cb303767fd43f2650960621246a8d205d086e03d9c1626e4aaa5b143",
|
||||
"0x091f876342916ac1d5a14ef40cfc5644452170b16d1b045877f303cd52322ba1e00ba09f36443c2a63fbd7ff8feeb2c84e99fde6db08fd8e4c67ad061c482ff276",
|
||||
"0x09277b3069a4b944a45df222366aae727ec64efaf0a8ecb000645d0eea3a3fa93609b925158cc04f610f8c616369094683ca7a86239f49e97852aa286d148a3913",
|
||||
"0x092fb789200a7324067934da8be91c48f86c4e6f35fed6d1ce8ae4d7051f480bc0074019222c788b139b6919dfbc9d0b51f274e0ed3ea03553b8db30392ac05ce4",
|
||||
"0x092f79da8f9f2c3a3a3813580ff18d4619b95f54026b2f16ccbcca684d5e25e1f52912fa319d9a7ba537a52cc6571844b4d1aa99b8a78cea6f686a6279ade5dcae",
|
||||
"0x09249d249bcf92a369bd7715ec63a4b29d706a5dbb304efd678a2e5d7982e7fa9b202e3225c1031d83ab62d78516a4cbdbf2b22842c57182e7cb0dbb4303ac38c5",
|
||||
"0x0904837ebb85ceccab225d4d826fe57edca4b00862199b91082f65dfffa7669b90039c710273b02e60c2e74eb8b243721e852e0e56fa51668b6362fd920f817cb7",
|
||||
"0x090a36f6aabc3768a05dd8f93667a0eb2e5b63d94b5ce27132fb38d13c56d49cb4249c2013daee90184ae285226271f150f6a8f74f2c85dbd0721c5f583e620b10",
|
||||
"0x091b82f139a06af573e871fdd5f5ac18f17c568ffe1c9e271505b371ad7f0603e716b187804a49d2456a0baa7c2317c14d9aa7e58ad64df38bc6c1c7b86b072333",
|
||||
"0x0929668e59dfc2e2aef10194f5d287d8396e1a897d68f106bdb12b9541c0bab71d2bf910dea11e3209b3feff88d630af46006e402e935bc84c559694d88c117733",
|
||||
"0x0914231c92f09f56628c10603dc2d2120d9d11b27fa23753a14171127c3a1ee3dd0d6b9cbd11d031fe6e1b650023edc58aa580fa4f4aa1b30bf82e0e4c7a308bb9",
|
||||
"0x0914c1dd24c520d96aac93b7ef3062526067f1b15a080c482abf449d3c2cde781b195eb63b5e328572090319310914d81b2ca8350b6e15dc9d13e878f8c28c9d52",
|
||||
"0x0927cb93e3d9c144a5a3653c5cf2ed5940d64f461dd588cd192516ae7d855e9408166e85986d4c9836cd6cd822174ba9db9c7a043d73e86b5b2cfc0a2e082894c3",
|
||||
"0x090858bf8a0119626fe9339bd92116a070ba1a66423b0f7d3f4666b6851fdea01400f7f51eb22df168c41162d7f18f9d97155d87da523b05a1dde54e7a30a98c31",
|
||||
"0x0902776c1f5f93a95baea2e209ddb4a5e49dd1112a7f7d755a45addffe4a233dad0d8cc62b957d9b254fdc8199c720fcf8d5c65d14899911e991b4530710aca75e",
|
||||
"0x091d7fde5c78c88bbf6082a20a185cde96a203ea0d29c829c1ab9322fc3ca0ae3100ef7cba868cac216d365a0232ad6227ab1ef3290166bc6c19b719b79dbc17fc",
|
||||
"0x091690160269c53c6b74337a00d02cb40a88ea5eba06e1942088b619baee83279e12d96d62dda9c4b5897d58fea40b5825d87a5526dec37361ec7c93a3256ea76d",
|
||||
"0x091bccb091cde3f8ca7cfda1df379c9bfa412908c41037ae4ec0a20ce984e2c9a51d02c109d2e6e25dc60f10b1bc3b3f97ca1ce1aa025ce4f3146de3979403b99e",
|
||||
"0x0927083540af95e57acba69671a4a596f721432549b8760941f4251e0dd7a013a917cee0f60d333cf88e40ae8710fb1fd6e3920346a376b3ba6686a4b2020a043e",
|
||||
"0x082170b57b8f05f6990eec62e74cdb303741f6c464a85d68582c19c51e53f490000a5029a62ddc14c9c07c549db300bd308b6367454966c94b8526f4ceed5693b2",
|
||||
"0x0827a0b16ef333dcfe00610d19dc468b9e856f544c9b5e9b046357e0a38aedaeb90000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x06126f891e8753e67c5cbfa2a67e9d71942eab3a88cde86e97a4af94ea0dde497821fb69ccdb00e6eaeaf7fc1e73630f39f846970b72ac801e396da0033fb0c247",
|
||||
"0x0420e9fb498ff9c35246d527da24aa1710d2cc9b055ecf9a95a8a2a11d3d836cdf050800000000000000000000000000000000000000000000000016ef00000000000000000000000000000000000000000000000000000000000000600058d1a5ce14104d0dedcaecaab39b6e22c2608e40af67a71908e6e97bbf4a43c59c4537140c25a9e8c4073351c26b9831c1e5af153b9be4713a4af9edfdf32b58077b735e120f14136a7980da529d9e8d3a71433fc9dc5aa8c01e3a4eb60cb3a4f9cf9ca5c8e0be205300000000000000000000000000000000000004000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
],
|
||||
storageProof: [
|
||||
"0x000a52b818e0a009930d62c17f2b1244179b7c14f8e1ae317fb3bfd3a3ba6060031b2a4aa2df31e79f926474987eea69aab84f4581cfd61b0338438110f6be145b",
|
||||
"0x001684ff1ef6ea054c5a6a5cae45f9280dacfc10c6cde39d1f64a00ad3c77549fe1c14ff8a628c0244ba48d63610e5d0b514c1b7b60301b6f27f77a435caf8bd60",
|
||||
"0x001a2ba0ad7d6447d3c2476aa2e6bd04ab552ac1840450ce11f338f58a80fcdf420df4b9fc89108a0a44d844d981abe44d5ab20a5a101d07e94d131f07bf83ba62",
|
||||
"0x0007158ec8942174c68bde0ab3666eb29b3c5784693bbfcd21126789d98bbdd05409f0313df8ddc438abe4798854f30c9daa2274950ce833a2de21e09b8b2c11b2",
|
||||
"0x000ab27b84c73e447618f030ad9d621b0d61cc783e7ae5671ffcd3ff479b5093fe173d6126fa71986aa679b5384a2dc25f3a15f806a546e933f9fda6ac0a3460d9",
|
||||
"0x0024ca9a7c6b7bf77c7a7acdae9d8e551b08ec6adf30abb7d1c45a6bbd5058ea921802170d5cc7de7d294cf6c67b0ac0208fe76497803554fb5bba9f78721568eb",
|
||||
"0x0018a60c68b26022ced26cce2be1af1d6b33f4c16596d1ba18d5f47fea98ae490b12e66678391e289de1cf981c122e765265b94f0669614d94847480a77c2d3b74",
|
||||
"0x001a776d5e5902c9a073c86a71ee80d167d6e2eb92150df2afb3d87f18b2cce6f02af158ba1cfbc643b36c1e001b59473cc88663b44c8d70739a27b804ec387146",
|
||||
"0x0012cd2c1070b0d2eb215eb760fba9b843bd5c732102ce9773701076b0e37a437e136901c4ddc1cdbef42f46af629296ca5965b41a53cce65237612cea27477076",
|
||||
"0x002bf94aa1fcb474365039e949bbbeabe0162ffc490b1b63ffe0f84bf182a8bf16169fe345e742d176a80f6e733177736d93e40fc9fdd4866efa6cc45ad94e9577",
|
||||
"0x001a2e6e1b585fa0564fc606c3d62c26d9a113d75430966ff3f500e450c762edeb24fb1e5456ed4313d9418a1b073ae8b3f852f0f8435752bbbe65d21726ddb873",
|
||||
"0x002529704fb28f7d3f9d2f3e9d38b000b6bfc2a21cb0a1955797016536066307d70ba7397326ecf50b98153f9e3baa96608efdf7d772b1ff28649bef677860dba9",
|
||||
"0x0022f4f22a1d85ac83a56e7031559cf874c78a2f2ee6b6b93625f588313964a6d0052f6c873c6417d409c2a5317b31449b36fb4faede558d03b448b06b4a198daa",
|
||||
"0x0017167b295954b29f62d7347dab3158aedc8586d5aa233d3f69c14bc7fe31eb840000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x002d7bed0c0f0318a6fc60f903f4a42841cc4fa431ddf1a97fc34f35d6a267434b2a1a818d75328089a9578143e31b1f535517e09ff50a728b100483e712c8bc9a",
|
||||
"0x0126ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111346048bf18a14a209505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a",
|
||||
"0x09240ea2601c34d792a0a5a8a84d8e501cfdfdf2c10ef13ea560acac58661882dd1b3644d1d4f3e32fc78498a7ebeffac8c6a494ac6f36923ef1be476444c0d564",
|
||||
"0x0912af3ac8f8ea443e6d89d071fccaa2b3c8462220c1c2921234f613b41594f08f2a170e61f5f436b536c155b438044cf0d0f24b94b4c034ad22b3eae824998243",
|
||||
"0x0916011d547d7a54929c3515078f4f672c6b390ccdd4119f0776376910bc5a38da1a059ed9c504fadcc9f77e8a402175743bee1f5be27b7002b0f6c5b51070452c",
|
||||
"0x09017285edc268d979eb410b46627e541afda16cdb3577ce04c15dc14cc6609c60143f0c01e71e99b2efbe3d8e62a2c812889aa9fd88dd4b0ed8eadcf1ec9b096a",
|
||||
"0x0922901e65200b007ad8e1b972e90403b336e459e0cf9b9d68732da345b1b0f6872c9e3f3edacbd857b26d0a66a80aa56c6ebaa9849e9ea5a2b17fd59cabe138e4",
|
||||
"0x091b77a00164a72880eec6c18fc043fa99f922e20bbee156e1ebfd3a358bee6bbb24d97cfaa234befe197a567476cade91b7d97a1017b8d5286dae4dddadffe1cd",
|
||||
"0x09216f1c4d67a9a428885bb8d978ad369d2d69d4dcc1692c3a0c3ea05da7d6f0ac2d6dda722e76eb513c67718e7be0478851758be5547322473a53b5b2b67faf95",
|
||||
"0x091f56c6f18ceb7077125df1ed17a42a85956090594125c1b182161de20f8af6aa2e36977412f9ea2ad2c0951153969eca8408317558ff1b6b4ad731726235f606",
|
||||
"0x092ca197dda6c519d80296f4fcda2933df9608ec684ad000133259024041d070812d29b058a998cf7ffc647b2739041725d77889f58953799c6aba6d9e5b981fc8",
|
||||
"0x091c25a87d321a09ad2a149d1a7eaa77727c7feffb4c39caf44f8edd4377f7bd0c16d1091494d3c90d301c1cb4596692798e78e4cc3d53c3a08e2641de43f9da18",
|
||||
"0x092166058c98245eb85b08da1c569df11f86b00cc44212a9a8ee0d60556d05a8030942c68b535651e11af38264ecc89e5f79b66c3d9ce87233ad65d4894a3d1c3d",
|
||||
"0x0908c3b13b7400630170baec7448c7ec99fa9100cad373e189e42aca121e2c8f450f9e40d92d98bb0b1286a18581591fddfa8637fc941c1630237293d69e5cb98f",
|
||||
"0x091362d251bbd8b255d63cd91bcfc257b8fb3ea608ce652784e3db11b22ca86c0122a0068fa1f1d54f313bed9fd9209212af3f366e4ff28092bf42c4abebffe10a",
|
||||
"0x081d67961bb431a9da78eb976fabd641e20fbf4b7e32eb3faac7dfb5abb50f1faf1438d77000c1cf96c9d61347e1351eb0200260ebe523e69f6e9f334ec86e6b58",
|
||||
"0x0819324d2488778bdef23319a6832001ee85f578cc920670c81f3645f898a46ec62e00385c4416ca4ccbab237b13396e5e25e5da12101021c6a6f9ecfe7c7fed19",
|
||||
"0x041421380c36ea8ef65a9bdb0202b06d1e03f52857cdfea3795463653eaa3dd7d80101000000000000000000000000000000000000000000000000000000006239b5a2c000208391082587ea494a8beba02cc40273f27e5477a967cd400736ac46950da0b378",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
],
|
||||
},
|
||||
{
|
||||
block: 95216,
|
||||
desc: "contract with empty storage node",
|
||||
account: "0xb75d7e84517e1504c151b270255b087fd746d34c",
|
||||
// curl -H "content-type: application/json" -X POST --data '{"id":0,"jsonrpc":"2.0","method":"eth_getProof","params":["0x5300000000000000000000000000000000000004", ["0x0000000000000000000000000000000000000000000000000000000000000002"], "0x1111ad"]}' https://rpc.scroll.io
|
||||
block: 1118637,
|
||||
desc: "WETH.totalSupply",
|
||||
account: "0x5300000000000000000000000000000000000004",
|
||||
storage: "0x0000000000000000000000000000000000000000000000000000000000000002",
|
||||
expectedRoot: "0x2dc794537b959b575dc216cd11389d802f9389fce7183278561a824aa8e950e2",
|
||||
expectedValue: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
expectedRoot: "0x1334a21a74914182745c1f5142e70b487262096784ae7669186657462c01b103",
|
||||
expectedValue: "0x0000000000000000000000000000000000000000000000600058d1a5ce14104d",
|
||||
accountProof: [
|
||||
"0x001988ce414103e97cb80613adde47d5fe0611b30087d1cfcdb84284c42907467e24ac744be2edcb86cdfc42a9bbb7b2a270649161c3ce3a41d3ad5a26927d2c79",
|
||||
"0x0028db7c407cab6652f1f194401bd87bda33c9a1723b4f93515bd5929cad02668123fa5a3e69136c8e03a62c805f89c9d3578a6f5fac4bb281fc4d7df12fbcc5db",
|
||||
"0x0006801926f00b574e3a88162d192482fecba9918b77e133dd77587d9efaf5c7861712d244ac8ad4bc0bffe0dbe8ab261865c9a69b4b7769e9c188ec048460ce78",
|
||||
"0x002f3161746c2c70c7cefb74c07bc16b28bd9011343f5c6f8756471cd0b184601a25d05d5447a572452964b3c20f40ef841bf313c958e82a6923584e20496df67f",
|
||||
"0x000efef3e3e174da6f3f451b5a2652d2489fff449a217c10841e68e4a15995d6521c4b1552c592020fbc7219c5d67ff00bd630db8102ce5c6ca12bea29b80ba5e5",
|
||||
"0x0019b4749b17792c0ad9f7b460a0faf35400da9423be38ac5c40e81c805acc72592c0b933e1c25d05db98d98fc4f04b27610b2ee88281126099aed42f27cd96b00",
|
||||
"0x002b8d563c5041f28afa38da01b6ec9e7278250be79f7f55e2586955e75ab75fad2055ea72cd44209c41c94ddfb980fe5b007b3e997085bc1fe5b514f72f860c05",
|
||||
"0x001335698617876fcc272740f765d53d53ee511dc9dc33965aaa0a5584f2f0fc02274c435ba9cc0fd5b897350de8cc1837d3a2baaa54ef3f9c66f689f20eddaf1a",
|
||||
"0x0010f766b8dbe13e3f27f45da3ad7e5c31fd1c11c51f4f892851519182cdc9348921c10d83a16e057f99623dcd68ab28a78e48b655df756245631521d04e85e583",
|
||||
"0x002bb5fce9df47073438d61ee438d900ab4ab01ac7f053e57c6ffe3e8f1746285016a600e6b7ee90281bbc3bd9b9523a663261cda2208ae98efcf76df8c965fb76",
|
||||
"0x002cad2eb5194b59d880565b03cd667a842923c1310a60bd818685c8fe4120d86817ee8bfffdb490f78f23d6fb38bb1c27f10f877c5017b8b2c21ad14f23df0eab",
|
||||
"0x001f064044ca94d6f30ef93ee1bb6ae35450acf1c8f5b113b0f0ff39e4b65cfb9a25141ae7fc30c69000991e65c626c1b12fb76bca02c68f8116d15698a5934b71",
|
||||
"0x0014382fa3481f424cc33c39f77fd3df54c5951be347c629ab5baec238e46cab050b2b8bec8ebdbc97dd6c0ab867aae5746e51b69b7b8177c96dbc0c4531521d3e",
|
||||
"0x0011941db7a46d1a3ddbd27a4a57a0ce1865b6e224552b233d9d545745489257f408c8e3a0a147e117dbb89827018a2df52d124cee29e82b15643e4877cabe4d06",
|
||||
"0x0000d7b8f99e5f148297bf4bf7e5133f87dbdf1932dbb152e0cb14c472c7d26f26146c4f72b903bb98b0855c1ca5bef4bada14a773dcda341d10402004e999d757",
|
||||
"0x0104eeb1fce36df4d3f6423137af3855d16bc936184295529c58682bb5217d64d905080000000000000000000000000000000000000000000000000867000000000000000130644e72e131a029b85045b68181585d2833e84879b96ea2850beb8e012d423615fd9926356a5b1f3a4599c7cccd6df3b45097b6527756e572b90fc8c40496f831f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb8351cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c220b75d7e84517e1504c151b270255b087fd746d34c000000000000000000000000",
|
||||
"0x0907d980105678a2007eb5683d850f36a9caafe6e7fd3279987d7a94a13a360d3a1478f9a4c1f8c755227ee3544929bb0d7cfa2d999a48493d048ff0250bb002ab",
|
||||
"0x092b59a024f142555555c767842c4fcc3996686c57699791fcb10013f69ffd9b2507360087cb303767fd43f2650960621246a8d205d086e03d9c1626e4aaa5b143",
|
||||
"0x091f876342916ac1d5a14ef40cfc5644452170b16d1b045877f303cd52322ba1e00ba09f36443c2a63fbd7ff8feeb2c84e99fde6db08fd8e4c67ad061c482ff276",
|
||||
"0x09277b3069a4b944a45df222366aae727ec64efaf0a8ecb000645d0eea3a3fa93609b925158cc04f610f8c616369094683ca7a86239f49e97852aa286d148a3913",
|
||||
"0x092fb789200a7324067934da8be91c48f86c4e6f35fed6d1ce8ae4d7051f480bc0074019222c788b139b6919dfbc9d0b51f274e0ed3ea03553b8db30392ac05ce4",
|
||||
"0x092f79da8f9f2c3a3a3813580ff18d4619b95f54026b2f16ccbcca684d5e25e1f52912fa319d9a7ba537a52cc6571844b4d1aa99b8a78cea6f686a6279ade5dcae",
|
||||
"0x09249d249bcf92a369bd7715ec63a4b29d706a5dbb304efd678a2e5d7982e7fa9b202e3225c1031d83ab62d78516a4cbdbf2b22842c57182e7cb0dbb4303ac38c5",
|
||||
"0x0904837ebb85ceccab225d4d826fe57edca4b00862199b91082f65dfffa7669b90039c710273b02e60c2e74eb8b243721e852e0e56fa51668b6362fd920f817cb7",
|
||||
"0x090a36f6aabc3768a05dd8f93667a0eb2e5b63d94b5ce27132fb38d13c56d49cb4249c2013daee90184ae285226271f150f6a8f74f2c85dbd0721c5f583e620b10",
|
||||
"0x091b82f139a06af573e871fdd5f5ac18f17c568ffe1c9e271505b371ad7f0603e716b187804a49d2456a0baa7c2317c14d9aa7e58ad64df38bc6c1c7b86b072333",
|
||||
"0x0929668e59dfc2e2aef10194f5d287d8396e1a897d68f106bdb12b9541c0bab71d2bf910dea11e3209b3feff88d630af46006e402e935bc84c559694d88c117733",
|
||||
"0x0914231c92f09f56628c10603dc2d2120d9d11b27fa23753a14171127c3a1ee3dd0d6b9cbd11d031fe6e1b650023edc58aa580fa4f4aa1b30bf82e0e4c7a308bb9",
|
||||
"0x0914c1dd24c520d96aac93b7ef3062526067f1b15a080c482abf449d3c2cde781b195eb63b5e328572090319310914d81b2ca8350b6e15dc9d13e878f8c28c9d52",
|
||||
"0x0927cb93e3d9c144a5a3653c5cf2ed5940d64f461dd588cd192516ae7d855e9408166e85986d4c9836cd6cd822174ba9db9c7a043d73e86b5b2cfc0a2e082894c3",
|
||||
"0x090858bf8a0119626fe9339bd92116a070ba1a66423b0f7d3f4666b6851fdea01400f7f51eb22df168c41162d7f18f9d97155d87da523b05a1dde54e7a30a98c31",
|
||||
"0x0902776c1f5f93a95baea2e209ddb4a5e49dd1112a7f7d755a45addffe4a233dad0d8cc62b957d9b254fdc8199c720fcf8d5c65d14899911e991b4530710aca75e",
|
||||
"0x091d7fde5c78c88bbf6082a20a185cde96a203ea0d29c829c1ab9322fc3ca0ae3100ef7cba868cac216d365a0232ad6227ab1ef3290166bc6c19b719b79dbc17fc",
|
||||
"0x091690160269c53c6b74337a00d02cb40a88ea5eba06e1942088b619baee83279e12d96d62dda9c4b5897d58fea40b5825d87a5526dec37361ec7c93a3256ea76d",
|
||||
"0x091bccb091cde3f8ca7cfda1df379c9bfa412908c41037ae4ec0a20ce984e2c9a51d02c109d2e6e25dc60f10b1bc3b3f97ca1ce1aa025ce4f3146de3979403b99e",
|
||||
"0x0927083540af95e57acba69671a4a596f721432549b8760941f4251e0dd7a013a917cee0f60d333cf88e40ae8710fb1fd6e3920346a376b3ba6686a4b2020a043e",
|
||||
"0x082170b57b8f05f6990eec62e74cdb303741f6c464a85d68582c19c51e53f490000a5029a62ddc14c9c07c549db300bd308b6367454966c94b8526f4ceed5693b2",
|
||||
"0x0827a0b16ef333dcfe00610d19dc468b9e856f544c9b5e9b046357e0a38aedaeb90000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x06126f891e8753e67c5cbfa2a67e9d71942eab3a88cde86e97a4af94ea0dde497821fb69ccdb00e6eaeaf7fc1e73630f39f846970b72ac801e396da0033fb0c247",
|
||||
"0x0420e9fb498ff9c35246d527da24aa1710d2cc9b055ecf9a95a8a2a11d3d836cdf050800000000000000000000000000000000000000000000000016ef00000000000000000000000000000000000000000000000000000000000000600058d1a5ce14104d0dedcaecaab39b6e22c2608e40af67a71908e6e97bbf4a43c59c4537140c25a9e8c4073351c26b9831c1e5af153b9be4713a4af9edfdf32b58077b735e120f14136a7980da529d9e8d3a71433fc9dc5aa8c01e3a4eb60cb3a4f9cf9ca5c8e0be205300000000000000000000000000000000000004000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
],
|
||||
storageProof: [
|
||||
"0x000c180cb3d57f72eb405dfc667d167967e4709cf3722a87b4c924f78a1d8fa9e926d16eb1f4902f8ac7a48fdf98274c9c4061f9f14f783e2fb41ef50c53d5f8ad",
|
||||
"0x000f78c968ee196c478c91d12a48edfde6c630d40203652c6420ff5aa3619549a4297615606d62866169d509f77c9cb38751ae282cafdc27caf891585b383b4795",
|
||||
"0x000798716960783afdcfd0749aa3b316d6e3d6ec2724853e629b42b5a9a10208e02e5f5fe3d5b8b823d3481aa1e738a1a24d6d1a63116e0003044672d73a7df2e4",
|
||||
"0x0014748f61c4954d239225204b4611d66384f08ef03db3da82957fd590ee00b6c92b873e4bd217f8dfb0fa29bca1087ac7bc29db616a6830ba456091bab772ac06",
|
||||
"0x000a1c900952239e98f5f1a3009e623bf6cf533d3b0d6d13d28d04f0496761927c0be199ff86f081ebb1c413e850450a4cce01dfd2c455156d7abde31385ae2ab8",
|
||||
"0x00028d4e89bc6ce55b5e6bba0f2f3758dafcdb4722e6c1a06f6faa8bae065bc8ae0644641c0ac696c265b3ec90889e3842c9a7a5902f1a5e807c5767ed49106982",
|
||||
"0x001e8434bf68ee6077d88efb5449ad286455a522e63a6bce5544cf785b77a5842d041a4e324bc47aa8ae42b56446f687758a8091986b6d760fd283a9e097a64e3a",
|
||||
"0x00250bc6ba916a2acb3ce53053a88be40b815fa749d144dc709a7a46a08361e83c05b2b5b05f45324ab921e04ae1278371ebe1e092203259f4e5306eb46ad50f8c",
|
||||
"0x0011c208e2c536c37674b1ecafff0261146c326c939544781da7062bbd0ac2fbca246f5225dc41e9fc17fe531f5bdc3325620e4003b3310a2cf7e31011b19c68a2",
|
||||
"0x001dc8d4177945ac89a3c61977ed787e50c9d8a0c5d85dd6b1409ec11213b324e6228005b222573db7882205be776a5bd2183944b6fcf63af604e31b9285bd010e",
|
||||
"0x0014ba74da33d2ca27e3f78bc1bd052c2b92176ce4136df751a8229051de383c2b0c8994f02704420f1f84963281364401d00f6d5aa9b6f52135bd96159c1c3b9b",
|
||||
"0x00188c7ee45a6c28fa7ad49a86206b70764066b1888b0de90e4410d7132a641f8b0eecbba072e28ed6705379104e30dd2557c47b30be7dd5e8c893b8a641d02701",
|
||||
"0x0010fb29a3bb8191eb03bd345ad1995bf6a57f09929f72dc8a9c42435c2eef734b1d565bfc8ae78d6c1496f2bdfeadff6890e8ddef4c6b730a5ec8575344800c90",
|
||||
"0x001b2abe5a1352c492c3ac47d2ff93896977a99a0783eedadc6246efc9b4e78ab408291f4e9234e4662a365f40090e1b323e3448fa2f6cdc9c929477095499c323",
|
||||
"0x00083b5711eb1cbba5e79c53227057d4987a22dd22b5ef715bf21f558917f48b17027f174fd4ca77e412ca65a7fbf6151e4473fa909ea384c7687b45f860d0103a",
|
||||
"0x00100158ee54f61ba5b093a43a348cfd202c87ba1533af2b24fc2f068de89a8d15100f3cc72c206d05d44db4272bd67db89bc6e5c86d7c1b03b40395ec4661595c",
|
||||
"0x002a15c17fcf2a10c6d1bcbd59ae262f80ad33518d499059a668e115045069ef012788a404ba41b5f8a96f0b294d0ba91e65b1bf58eee74adb8e55ca12f22fdccc",
|
||||
"0x00031177585837e616bc830056a4bd12821c9c779096df361ebe1d77379e96ff9e0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x02",
|
||||
"0x09240ea2601c34d792a0a5a8a84d8e501cfdfdf2c10ef13ea560acac58661882dd1b3644d1d4f3e32fc78498a7ebeffac8c6a494ac6f36923ef1be476444c0d564",
|
||||
"0x0912af3ac8f8ea443e6d89d071fccaa2b3c8462220c1c2921234f613b41594f08f2a170e61f5f436b536c155b438044cf0d0f24b94b4c034ad22b3eae824998243",
|
||||
"0x0916011d547d7a54929c3515078f4f672c6b390ccdd4119f0776376910bc5a38da1a059ed9c504fadcc9f77e8a402175743bee1f5be27b7002b0f6c5b51070452c",
|
||||
"0x092293af71b7b9315c32d08f06e291b85e3b3dbba786dd31952369f666281aa21125ab35feae70aaca9349f6af48f7dcf2dee0324e4eae03e929963e7728b633a3",
|
||||
"0x090607033a4b976c1e4683298d66b88a95ed45033ff43dea0670d84a8c42d35bf12562869385c0e70f561f18be4b78e7276b837f140a45ab12ffef1ba4ad5faecb",
|
||||
"0x090abc5f713c2f58583114bb5081d00cbd01789d8efbd95e471b151c71c475142f0f52ad30f8a63288eb9dd12aca2a670de08c03f8384f55d730c943e1c472625b",
|
||||
"0x0905156e8704d6195f6ae562aed2072f4e32422c6dfd4840ca354b9c4d2de5ce760fca52b1e0689ad374bae9fbea262a929f919695149a083fe6bacb806dc02fca",
|
||||
"0x0917078d4c193a3fdbfe8ce3a235a0e1df89e626b5e91636097e299883fc2447892ad46eefbb27909544fe02c05e29760315749f6ce21c17c52158f5f5616c2dad",
|
||||
"0x0917d02e5da8bdb969149c9327b247a6aaa479bcda4a03665da5103c10e616d2f40ccabdacdd25b34235d26e50e7af5d8d312a2cafdcadd41cc589a71a322f254c",
|
||||
"0x090c62f5c476c1def8ed8a8c25ae54581690b39dfab4b0f3f78b93df96f626714328ea922a76a058087563bb5370664e9a1cebe3062f2d904bf5e3a018219d6563",
|
||||
"0x091e481971f770e587b1f62f1da9ac4687abc5b2a23097fc38332e15ab957ca0ab0ec0a95c15313887e0d2f166c100deaf17f2ce50767680e6e5b2e3068801c0cd",
|
||||
"0x0911799e186f1bd299dfa08c07404b9d28e2b179fb6ad523f1846872537b6db85f198b573ac1397048258de38b391fcc5e0c86a0f81f4ca607785fb37041ab8b4d",
|
||||
"0x092053a028cf3bfcdabcb58985efc39f078cb0bcae4439528a0b6fe4b24bbdbd2c019a04a54e9e96077f3c2c39c1602a83387018b6357ea4c28e96764865d1c8f3",
|
||||
"0x07303fad3e4628ccae4de1adb41996c9f38b22445b6525ff163b4c68cbde275b1a06111cae9b4d17b730d94f589e20c6ae2cb59bf0b40ad05bf58703ee6d46eac4",
|
||||
"0x0606bc3fca1f1b3c877aa01a765c18db8b0d7f0bc50bd99f21223055bf1595c84d04fdc0fd416d8402fde743d908d032a20af6f2e65cdc6cc289f72c04f1c2476f",
|
||||
"0x04020953ad52de135367a1ba2629636216ed5174cce5629d11b5d97fe733f07dcc010100000000000000000000000000000000000000000000000000600058d1a5ce14104d200000000000000000000000000000000000000000000000000000000000000002",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
],
|
||||
},
|
||||
{
|
||||
block: 95216,
|
||||
desc: "contract with no storage",
|
||||
account: "0x9c0fc47d9346e2be1e24f6cef76149779fe52715",
|
||||
storage: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
expectedRoot: "0x2dc794537b959b575dc216cd11389d802f9389fce7183278561a824aa8e950e2",
|
||||
// curl -H "content-type: application/json" -X POST --data '{"id":0,"jsonrpc":"2.0","method":"eth_getProof","params":["0x5300000000000000000000000000000000000004", ["0x0000000000000000000000000000000000000000000000000000000000002222"], "0x1111ad"]}' https://rpc.scroll.io
|
||||
block: 1118637,
|
||||
desc: "random empty storage in WETH",
|
||||
account: "0x5300000000000000000000000000000000000004",
|
||||
storage: "0x0000000000000000000000000000000000000000000000000000000000002222",
|
||||
expectedRoot: "0x1334a21a74914182745c1f5142e70b487262096784ae7669186657462c01b103",
|
||||
expectedValue: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
accountProof: [
|
||||
"0x001988ce414103e97cb80613adde47d5fe0611b30087d1cfcdb84284c42907467e24ac744be2edcb86cdfc42a9bbb7b2a270649161c3ce3a41d3ad5a26927d2c79",
|
||||
"0x0007af09eec4d7cc8903e99bd9fb9b8e57c30b0c3e34b17da769b01a9a1b943f391c4537228dbbfd7e7cce02123416bfdd61fb83577725516123b569eafcd8087d",
|
||||
"0x0013a22efa6a843f6de1925fce2f6a83c7ed307182b16f661d0e7a8046561999393050830a440d2506adf42ccedece4e3aadc6bc80cea20fc1d8ed9e9c61597da0",
|
||||
"0x001056a19427eac81b91de5db696812b3a0384bf41b37a12e9cbb7dc62404a102a1465c13c8d3721e137a64d9e5ba1267ac418339b3648bfab5a2a86f2343c2b4d",
|
||||
"0x000794d2c0e19bc86772c2d1a43d46de87ad221847bddcfdffa19dbd34f3c3a9b507c5f198eb63c18640af5eff5480830147639cec070d276b778f21677d22ce32",
|
||||
"0x000b23d93f98ec6e3536ffcab6afc6e2eb9b73aeb573d288723350366c05469e2e23837ffea9235351ee533af680d14011825e098f81ce3f8f59e9f08deff05e3d",
|
||||
"0x002ad200ac8be8275ef12b8aeaec526d2b5255128968a2cd2ff775cab14e2ec4e907f2e9b849239e0e94332a50ac9d97320c56ca718c5e023cacd69f80b4c97c86",
|
||||
"0x00284be135a2d7f5822a7949189b90696df39b1b183206c764576bf457df4fd1560204a9fc6c0dc199eecb404acfcabf4a633916fc94d2790dcd34959809c2195d",
|
||||
"0x00270c2cd154aea3b575a1c7d47c62576bbdce6bbc7ccf5682e7962cf6cb77f0d317fdbac10917644860584c3057c750df695f529189f90910c30f114257719990",
|
||||
"0x00174956df87889921e2a6ddb257fa84508fd7ea22c5a622b84378678e781a2289053dc6b3c4f91335b64f4b170bfe70bb5e2e316227b329d2b1205e7c62c4f755",
|
||||
"0x002f9284ded18b8f281841094a93cb55b95884eec55d8eaa759c6175ddb2e037111c63bcee8ccf544fff55c3e502270e574d1f0b6265c4c7c6f42db5061b0120db",
|
||||
"0x00065fdf05e66407d26a36a49d042c9c5e8cebab3baa2d3fd1ae6e673c3636cf7e2d9dbf3781e3f26f06fb503638a8bf00882f58dc83500338df4b7e08a290a5fb",
|
||||
"0x00138987046c770f02f5d8e7d073f6c055536450fa55ccd2a23957598b6070297926f3a0b645072c5bd5c15cdcf03a4474e94d760e3a76fb8714b20b9d74608823",
|
||||
"0x00280e7f8e278e02e43843aaba5a9a722a89af0ece06b5892284f825974e1c1984185be1fda9b5322a4c41023127eee438849ea23390e6c2d4d9abdedb5a1a43fc",
|
||||
"0x00208f32072c6e20863710406ad34339da1124c639941e935818dd9ad9419849c91e0e37873df7eb190a2846789df889bbfd522200e2a41423ff9ab0acf2592be0",
|
||||
"0x0005fb23491fabbc9b3eead71117b86a27952e8fd4b3380336ac3f479832e94bad109a1b6dca757696b8831d2529ffda29f37af36f92fec738376df77561491083",
|
||||
"0x0028baee42b4a9a70b7ec1e50ea1a6817f812082a28598dca106aaecf2761fb63c06e5b589490c27f5cfc233890456ec47a7365ff2882a27c73968f4829d011b05",
|
||||
"0x001708247f7a96b84cad27c31985cd39b6cc9435b4ec3f4db9aeed3311c213de651e2f271ae0fa011e5e6fccd121492400327efb915c95d85956a9cd27ceb4321a",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000002332e856217b3bab09901f1daa9ddc91edf56964e03675d260d00ffdf6e2e715",
|
||||
"0x000571dce6fee951f457db89bae18abbd78b6b06504602a103133d2a38cabf5f5b1ecb13b03e3493e217c65da70baf4c4fad74808110658924869ba0e75d0871db",
|
||||
"0x001738a6461148300d30699edb55d3b5bb62760aeb9384c07d61aa062c401f3a7d0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x000c14152707412177bbe1cfed882d7d7bdfca4e96be701a3c41bb3d254491f0bf0096ebc25015b9a40d4fe7490bda8ecb7f3a01e858d7833dce8f1993be4db07d",
|
||||
"0x0117294cb69b0984b3a26d77eae252f9d8e438808bf276ee8c0c0546b7316c9bca05080000000000000000000000000000000000000000000000000ab1000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ed3aa0dd2cd363d4cea5d5283ec359f75be36a12ceddc7f80a58af9d39a418a02b6a0ff9eb34bf0e52f67047f95556a96c4f40822412da0c8bd0340996a754f4209c0fc47d9346e2be1e24f6cef76149779fe52715000000000000000000000000",
|
||||
"0x0907d980105678a2007eb5683d850f36a9caafe6e7fd3279987d7a94a13a360d3a1478f9a4c1f8c755227ee3544929bb0d7cfa2d999a48493d048ff0250bb002ab",
|
||||
"0x092b59a024f142555555c767842c4fcc3996686c57699791fcb10013f69ffd9b2507360087cb303767fd43f2650960621246a8d205d086e03d9c1626e4aaa5b143",
|
||||
"0x091f876342916ac1d5a14ef40cfc5644452170b16d1b045877f303cd52322ba1e00ba09f36443c2a63fbd7ff8feeb2c84e99fde6db08fd8e4c67ad061c482ff276",
|
||||
"0x09277b3069a4b944a45df222366aae727ec64efaf0a8ecb000645d0eea3a3fa93609b925158cc04f610f8c616369094683ca7a86239f49e97852aa286d148a3913",
|
||||
"0x092fb789200a7324067934da8be91c48f86c4e6f35fed6d1ce8ae4d7051f480bc0074019222c788b139b6919dfbc9d0b51f274e0ed3ea03553b8db30392ac05ce4",
|
||||
"0x092f79da8f9f2c3a3a3813580ff18d4619b95f54026b2f16ccbcca684d5e25e1f52912fa319d9a7ba537a52cc6571844b4d1aa99b8a78cea6f686a6279ade5dcae",
|
||||
"0x09249d249bcf92a369bd7715ec63a4b29d706a5dbb304efd678a2e5d7982e7fa9b202e3225c1031d83ab62d78516a4cbdbf2b22842c57182e7cb0dbb4303ac38c5",
|
||||
"0x0904837ebb85ceccab225d4d826fe57edca4b00862199b91082f65dfffa7669b90039c710273b02e60c2e74eb8b243721e852e0e56fa51668b6362fd920f817cb7",
|
||||
"0x090a36f6aabc3768a05dd8f93667a0eb2e5b63d94b5ce27132fb38d13c56d49cb4249c2013daee90184ae285226271f150f6a8f74f2c85dbd0721c5f583e620b10",
|
||||
"0x091b82f139a06af573e871fdd5f5ac18f17c568ffe1c9e271505b371ad7f0603e716b187804a49d2456a0baa7c2317c14d9aa7e58ad64df38bc6c1c7b86b072333",
|
||||
"0x0929668e59dfc2e2aef10194f5d287d8396e1a897d68f106bdb12b9541c0bab71d2bf910dea11e3209b3feff88d630af46006e402e935bc84c559694d88c117733",
|
||||
"0x0914231c92f09f56628c10603dc2d2120d9d11b27fa23753a14171127c3a1ee3dd0d6b9cbd11d031fe6e1b650023edc58aa580fa4f4aa1b30bf82e0e4c7a308bb9",
|
||||
"0x0914c1dd24c520d96aac93b7ef3062526067f1b15a080c482abf449d3c2cde781b195eb63b5e328572090319310914d81b2ca8350b6e15dc9d13e878f8c28c9d52",
|
||||
"0x0927cb93e3d9c144a5a3653c5cf2ed5940d64f461dd588cd192516ae7d855e9408166e85986d4c9836cd6cd822174ba9db9c7a043d73e86b5b2cfc0a2e082894c3",
|
||||
"0x090858bf8a0119626fe9339bd92116a070ba1a66423b0f7d3f4666b6851fdea01400f7f51eb22df168c41162d7f18f9d97155d87da523b05a1dde54e7a30a98c31",
|
||||
"0x0902776c1f5f93a95baea2e209ddb4a5e49dd1112a7f7d755a45addffe4a233dad0d8cc62b957d9b254fdc8199c720fcf8d5c65d14899911e991b4530710aca75e",
|
||||
"0x091d7fde5c78c88bbf6082a20a185cde96a203ea0d29c829c1ab9322fc3ca0ae3100ef7cba868cac216d365a0232ad6227ab1ef3290166bc6c19b719b79dbc17fc",
|
||||
"0x091690160269c53c6b74337a00d02cb40a88ea5eba06e1942088b619baee83279e12d96d62dda9c4b5897d58fea40b5825d87a5526dec37361ec7c93a3256ea76d",
|
||||
"0x091bccb091cde3f8ca7cfda1df379c9bfa412908c41037ae4ec0a20ce984e2c9a51d02c109d2e6e25dc60f10b1bc3b3f97ca1ce1aa025ce4f3146de3979403b99e",
|
||||
"0x0927083540af95e57acba69671a4a596f721432549b8760941f4251e0dd7a013a917cee0f60d333cf88e40ae8710fb1fd6e3920346a376b3ba6686a4b2020a043e",
|
||||
"0x082170b57b8f05f6990eec62e74cdb303741f6c464a85d68582c19c51e53f490000a5029a62ddc14c9c07c549db300bd308b6367454966c94b8526f4ceed5693b2",
|
||||
"0x0827a0b16ef333dcfe00610d19dc468b9e856f544c9b5e9b046357e0a38aedaeb90000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x06126f891e8753e67c5cbfa2a67e9d71942eab3a88cde86e97a4af94ea0dde497821fb69ccdb00e6eaeaf7fc1e73630f39f846970b72ac801e396da0033fb0c247",
|
||||
"0x0420e9fb498ff9c35246d527da24aa1710d2cc9b055ecf9a95a8a2a11d3d836cdf050800000000000000000000000000000000000000000000000016ef00000000000000000000000000000000000000000000000000000000000000600058d1a5ce14104d0dedcaecaab39b6e22c2608e40af67a71908e6e97bbf4a43c59c4537140c25a9e8c4073351c26b9831c1e5af153b9be4713a4af9edfdf32b58077b735e120f14136a7980da529d9e8d3a71433fc9dc5aa8c01e3a4eb60cb3a4f9cf9ca5c8e0be205300000000000000000000000000000000000004000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
],
|
||||
storageProof: [
|
||||
"0x02",
|
||||
"0x09240ea2601c34d792a0a5a8a84d8e501cfdfdf2c10ef13ea560acac58661882dd1b3644d1d4f3e32fc78498a7ebeffac8c6a494ac6f36923ef1be476444c0d564",
|
||||
"0x092fa31ba6c9b8f291512a582ab446daf7aa3787e68f9628d08ec0db329027d9001af83d361b481ed4b943d988cb0191c350b8efc85cfceba74afb60783488d441",
|
||||
"0x092c2ec2d967208cb5088400d826b52113d606435be011b6c9f721f293fb12242515681c9016eb1c222dcdbeeeb9fd3a504caba892f4c1832741a2b17a7305598a",
|
||||
"0x090c7fe825c29bf5df80c7101ff8a372ba4f7b2ac37c16a3bbda38cc1e38e682460499b7e5d21d3784f496e747140f465eb1a39a019d2be8baf13a5e39f359a4ed",
|
||||
"0x092bb11ebbc7cd1e565b86498aecab16842ab3fa852c7943cfbc49ee4bc593b2f308a78e1bc555e07d36d5c812af57c18f67199197a52ff74bc4e32ca6b7fadf32",
|
||||
"0x092fd1e042080801034c6d6c79d462016c74b97dfbb1272cf606e638911a08f21c02434541eeed6d66002c69042f9354211e40518316a2d98cc0da0f19fb1ea013",
|
||||
"0x09024bd491ec707bc3e8bea6b2754f37b1e85903061aefabd945537eef2f4d38b4136b925b004d29603c5e6195e073322d27f0c6ea3fa1ea5c5b248ff60dda594c",
|
||||
"0x09269e1f468bd9bbde77a13562645a80a77d26d801781ca95d385bd59ee1b0890b03694bf9043190620265bf0bc3baa4d82cc82302ae0bbf33cfa48b0ec9d5ab25",
|
||||
"0x0924d8bf62b2a725684847208dc021d5aee9f3c8f14c14786bc9f93232dfd3e068120bb7d022bbb159b4b84bb9e36cd2fcd89d761e265c1b88c8bdb9745a51cb22",
|
||||
"0x092680f932920fd86de0b417cfdbeb2836a470213097ed5abb1a2b4deba8437f6825fd0ec614b97e6cfa4d50b08ad1e0fd8a5cd72db3a468128d1045d6a54e5e6e",
|
||||
"0x0909e630914cee4db538057a0218a72288b88b2603aee0f805254b865a03de87c92ce46c1aa77ee8c42bb60c4175826f4dbb89d6282c01ff3de654c961599e66c3",
|
||||
"0x091a17302d53ad1b7a4472d111fd27b35720d49ce27259b5e42f46339dddf235e82b973c29f44cf69b589f724d7d2fa54bf38b37bde3fc66c0d965a8c10df80caa",
|
||||
"0x0916572156ae22ae2b0bc84ff41d16668be7163da26db2b13b86c218e0516c97a4131b584b7192464dde26060f66f678b03c8db8f64f1cd7a1f98a22a90cce5850",
|
||||
"0x092c6ee2ca598c123445bbbd403ca3ab8a95ce2443f941ebdcf7bb035e2a3e38e22e8d5b222a1019b126f0ecf277c7fed881413e879cd4dc5df66634b6e9fb688d",
|
||||
"0x0700000000000000000000000000000000000000000000000000000000000000002822301c27c0bd26a8f361545a09d509a2feed981accf780de30244f0300321d",
|
||||
"0x05",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
],
|
||||
},
|
||||
{
|
||||
block: 95216,
|
||||
desc: "EOA with balance",
|
||||
account: "0x0384a6f7e2588bb251688f9ab8d10932a98e9f28",
|
||||
storage: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
expectedRoot: "0x2dc794537b959b575dc216cd11389d802f9389fce7183278561a824aa8e950e2",
|
||||
// curl -H "content-type: application/json" -X POST --data '{"id":0,"jsonrpc":"2.0","method":"eth_getProof","params":["0x5300000000000000000000000000000000000044", ["0x0000000000000000000000000000000000000000000000000000000000000000"], "0x1111ad"]}' https://rpc.scroll.io
|
||||
block: 1154766,
|
||||
desc: "random empty storage in some contract",
|
||||
account: "0x226D078166C78e00ce5E97d8f18CDc408512bb0F",
|
||||
storage: "0x0000000000000000000000000000000000000000000000000000000000000001",
|
||||
expectedRoot: "0x1e5cf13822e052084c315e944ca84f1ef375583e85e1508055123a182e415fab",
|
||||
expectedValue: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
accountProof: [
|
||||
"0x001988ce414103e97cb80613adde47d5fe0611b30087d1cfcdb84284c42907467e24ac744be2edcb86cdfc42a9bbb7b2a270649161c3ce3a41d3ad5a26927d2c79",
|
||||
"0x0028db7c407cab6652f1f194401bd87bda33c9a1723b4f93515bd5929cad02668123fa5a3e69136c8e03a62c805f89c9d3578a6f5fac4bb281fc4d7df12fbcc5db",
|
||||
"0x0006801926f00b574e3a88162d192482fecba9918b77e133dd77587d9efaf5c7861712d244ac8ad4bc0bffe0dbe8ab261865c9a69b4b7769e9c188ec048460ce78",
|
||||
"0x002f3161746c2c70c7cefb74c07bc16b28bd9011343f5c6f8756471cd0b184601a25d05d5447a572452964b3c20f40ef841bf313c958e82a6923584e20496df67f",
|
||||
"0x0007602275f17f6c339ec3febc879c2ca72efa782ff1888b04553f82333eb0e60c068c8e4fe6da32f7f80a4acb50b690a7204581e5e4b8e9e7daa115dfcb466ae1",
|
||||
"0x000cb512d4ab158b5e7d5852cc7531788f11e64e5959cc1233d7a64eaaca36426116fea9120cf06c241843db50d81978b402281dfe15ba7d8a8c689bfbe0b31a1a",
|
||||
"0x002eb4fff0642f7be6d8e95793d9371d606df48efd0b62a7eb01b0a9669307be2b0ee7d01463afc3dac441f66e675ba06fec67b692e3f7a46510d096836468a3cb",
|
||||
"0x0003ea09dc5b0ca3ce2961d3200c09b837ea535447e3ba45e5583dbb4e9db48b2208abfec237c907584104b11444f55fa3fa7e6f6a5954817ecea6361516f0271b",
|
||||
"0x001c654478a700ac0414f5cd8da557e04f9570939802c3963e801523f001ebb4d916d301b50f89760520da2a662b03a207e9372902153ba84ef0f5438472f466c6",
|
||||
"0x0009f3b0d95ec5d88cfc2db19520f43d110d12c757a58ae7f578095de96e5d319d2c8f43a67b0c01008670f07eb53071b835f19cbb45d6e76281a083087217d988",
|
||||
"0x000348f024d617f64de7be803547c109b98f833b090e8a3dea0c2bed201ce752c12a4fb71f098941741c42e156651d8a42632e3acbf6f14cd9763b50216af75d61",
|
||||
"0x0029f85b49319fe7dfced69a258b1baf213d638fe3082b9a13f38e553e9d3269333054c4cb6d1e91bc2dfced1559b58cd6474ac6583a1fc5a2bef5eaa7b96ecea0",
|
||||
"0x000a4d19e2ec5f98d9ccdc1e94d9334668b87ea451195f9a8319b98cfdb077c5ce1adc64852505188363c7e98b83501e876862d8ffbd8b4051f3cb6dde7f0e8afe",
|
||||
"0x002568d5d87f19b2b3f2b7341ee61fb45f56dc76734beaa4f1a9865b80b9d9a7d500a191ba054a28841f25c34ad384817a2af2ebada6047517dbb2b6a1338e48c7",
|
||||
"0x0027f6df1a3610c7447efd280fa6a949713456a1ba79b50dc7fb87c5cb3312b19311b3c9c4420874b02bdc1ea102dc77bb803c1a5042d565aea99054ae0eb816b2",
|
||||
"0x0018a3d33e2c0d076ca4ddb093516d90cb8ba8b508e8d372d3a8a93aa9eef6079b138df6cb61c8f92dcbea8cd90ead1efa49f3a24f814c88a7bdca8fd83f4d0675",
|
||||
"0x00268f3122e558d5084a1b3ffc293b67bd2436152fbee80566226d4a753b5b44c40b6d06e2f5f17009a7e146889c2f492b077a462d602e0e72f53373a154aa450e",
|
||||
"0x0006c81bc9375fe1a0ebb75b151c8a321b85970c1a8a5aa7396a7076a4d6f26c8118a7e9e0987d7c6d0100180c9ba496db2b967f6acf7bc11d002314693416b3bf",
|
||||
"0x011fb221b659992b8d98a645cb37666f934ded70f1f5d82dad67dace71d7191f8105080000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000d8d6f2b3da41cda2e0000000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4702098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864200384a6f7e2588bb251688f9ab8d10932a98e9f28000000000000000000000000",
|
||||
"0x09062c633f6d7c7a157025fef8ab1c313a7caadda3a64b23664741f9de3b0478fe27571cf9b45d5f4deddf5f0b5354a613998fdcbe9249bb7cde92fd45513c5a99",
|
||||
"0x0920d6877efe14060018278754e91682430401880981fec1cd1b63610bed0c1e332a63aca7a8898b01983e2c53a7257310318da444fd6c8b705e488943205301a8",
|
||||
"0x090f6dadd53bbc0f5fa4fa03961aff0bf252ae335e11c1836253b6bc214d66759010b10d80991219a66f1eb7e07169b4cec4fa74b04edbdc08c3f238dfdf1d2fac",
|
||||
"0x0921ea10af71b5f3587ff9d42178a151427cbcde37b8bee6575463bf6b83110cca0520d5f97b44e7015453ec16d9c28980d2cec3df5c860eb8a455f49dcfa339be",
|
||||
"0x092d19cf96a7c129aac6f72f780703a9ef3233fc5124d592baee751a3550dd692a02c962b87efbba5aeea4856c3df29c1ea540e1fbc7a74529d5dc793fe8e490d8",
|
||||
"0x0922e20a087e600560007189ccc1a159e4fffeb1876a6de3772b7f450793a1c6620ada74791f3ecd25a650701578ef9661c64e75d836c681503e96228974a53903",
|
||||
"0x0924839671b636ebb56cb9a2860a3edf2a2875774e84dfcf8546135189f808d724260ac8be541ff088a9a1d2468c4c6e2faa793009be553a3cbca003649ee511db",
|
||||
"0x090cd8140d844f62e44ffe820c1b2b0d4aa8f0518c15ff61759d93d805cb017cb628d5b46a4c4ec0a10eb00155808890925050f7af2279b512c25005d963283262",
|
||||
"0x0913c0698673b0be011485eba05c61ac41bf14fc960ce5dbb6f5a021809eabbb0e18adaf85a3724e1a644268b845f5014b39e574928b9a01bfcd25d6fe1cf03e8f",
|
||||
"0x0912c2e7da4b091c52e0012e5c13baf07d9d9daed10a558262d2e700a7c823300e054dce1849561bbeede4368a3be06f5a2bae06bdb1bc2bcefdba84634fd1991c",
|
||||
"0x090b3e9c665497a0f9c1d3f1448c6d9144a287eb0accf86fea6f443f51986df7130392814f078a19643081787478ec3a010e2757a574877a194136c529813cf7ae",
|
||||
"0x09249a0e273abe79a0b99a55516e19213191b7f77ef34f8815edc4e1ede8711f7920615adbac1983d844c8a6ed50922562432c13d030069d8b3e92611b4fe39531",
|
||||
"0x09199575893e55d92fafb3b067130b9b6b5a46e7f6fb2d0af412d12591632dfe961adffb9dd1e7490095aac94bc1fcaeb591f4ba907fe2b882c9f6d8f7ab3a1809",
|
||||
"0x09259308e9398f029ebbe31a4b353f474622b4c96995b7365c3b13c392fcc3e7001be60286a497a3886aa9cff3ad6a5dc71504078eb7a44c43530b7b33eef4743f",
|
||||
"0x090709a21aaf18a1eaea3b925ab36f47a82095aa3e9ddbc4f01463005c4b64f6af0554d854637fcbfd9b1a4c2474de343950569e4f855d66f2ee14fcfb19ee17f5",
|
||||
"0x092d7319be75a70b8ea5f0acc6ab4a96971ec546f72b18bdc3e905ad6ea8a288f70626499aee389335559b1dd3cc8b6711f9fde0c517236190cba24fa87993877a",
|
||||
"0x09081b165a51e3081fc2e3e27d6fdb81134b65284851798de62899db3065a8c1fc040c8dce92508a510c2c34fc2949910dd41247c9f247cd216c03d9bb9d2881b4",
|
||||
"0x092a27c5be32e1ab6e85d1ac094bc1509d92285f45c63fca6dba9b14d485a94af326d44c1ff85666a4790182ddd7e51cbbe06af81d62082e6d79faec29a4501369",
|
||||
"0x091a46df6ffd6b439ffcd1b57e9548f5c4db26ade9e984efc8a91a01ab22134d3c1617b504ac2015793c5dac16d379b5ca6cb70c14243491bb68535ee686a3a553",
|
||||
"0x08180e90f9f9a4fd8065a5849539793bd9e9340b69770eff1716a733241e454c341641f913f1c32e2c652b876f902e5c2c8d51c482411ec44dae969bdc50264c42",
|
||||
"0x06273c162ecb059cd86ec0a01033dd61c39f59ee0a13eb41a28c0b2d49a45f6f94081be344adea9f54587a832b9efef6fc9ec010d86ec5fb2b53b5ff8dbabc4924",
|
||||
"0x040b792f5b15327fc37390341af919c991641846d380397e4c73cbb1298921a546050800000000000000000000000000000000000000000000000000fb0000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000be74cc05824041ef286fd08582cdfacec7784a35af72f937acf64ade5073da10889249d61c3649abf8749bf686a73f708d67726fada3e071b03d4541da9156b20226d078166c78e00ce5e97d8f18cdc408512bb0f000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
],
|
||||
storageProof: [
|
||||
"0x02",
|
||||
"0x05",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
],
|
||||
},
|
||||
{
|
||||
// curl -H "content-type: application/json" -X POST --data '{"id":0,"jsonrpc":"2.0","method":"eth_getProof","params":["0xC73BfBD94fb1FD860997D4E76D116BDE0333BeEf", ["0x0000000000000000000000000000000000000000000000000000000000000000"], "0x2a7531"]}' https://sepolia-rpc.scroll.io
|
||||
block: 2782513,
|
||||
desc: "contract with only one storage entry",
|
||||
account: "0xC73BfBD94fb1FD860997D4E76D116BDE0333BeEf",
|
||||
storage: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
expectedRoot: "0x13c6008daf17807163a056504e562d4adf13870306814b1a3877cda5297d5ae9",
|
||||
expectedValue: "0x000000000000000000000000000000000000000000000000000000000000000c",
|
||||
accountProof: [
|
||||
"0x09272d92cb48d19e41ef64be1da3e10026eb87d227132becb4fba0dd1451783de425f66c55ff0bec0b012e11d64aaaa6c322566d58cf45525cb05302132518f23d",
|
||||
"0x0920908000907fe2260e41f8682510eee0572937459163ea1940c2eae8b2d5862e015e7c84f56f5948bfc9c242506d14f5c3c1b97bba1b262b40b108f5d7e69287",
|
||||
"0x09078402c38a02e2b3dda819b761ca6448029f3dd42ae7876ac0dba0d762e3ddb818d80485f0a15f54f110aad9a98b00bdf9ccb56bbcb069552c7f6c10be1b9c15",
|
||||
"0x09123243fe438606648fe3bcef5eb0165920315fb2b9316ce3ec0daac885577f190b84d9901fc150f52ed177f23ec31de4254b293c6eac2088009f3e13e3a08b78",
|
||||
"0x09053c59663d3eafad212f58c4834090db4bfd0ba2b13d3108e0acade089a5da9229a75e0b30abc41d4fb252faf9f3aa8ef750b780247d83186cdc333635c25038",
|
||||
"0x09163255ef0b1fdec7ec97c4e002cdeb6c963ca26d9d03ebdf78eb44dfdb57e4bd1fa9f68cc583c1e7019cc62133ede53e5636330de9a2c09e75f03760026e3729",
|
||||
"0x09296d3cb1c4fd539ed015f2853649d20f5db111ce13c30b7e6efa4c9468741d1e0eea62adcf73aa5bdb4868cd776df429d26787f424beeda38f4ad19aa83e43e4",
|
||||
"0x0908288df27fa423895de38ec5a52e809d99b683c5b32463501f5dad642b71387f0a3d37ae9df53b5cfdda0ac67765662e8a71a19b05d38f4a464596e129a35570",
|
||||
"0x091a774fef4e8294fcca57d213846e51bfcf71249680e937e14248e532b47abd762ad72878f07f4abbba8bd13da9b75f681f35a748bb8fc078913e16a91bce783e",
|
||||
"0x092799a146ba6b2bf4b6a24aef88c9590d9643d53f429438e348518a17af3d6e8d10e3b39898c3795c9386518438465581ca232445532fb549a8bddbdd6f4e0eed",
|
||||
"0x0914c654d53c9f8656b784709decbd12ba786800a77c929f3b4255d59138b42dff282005f8997b73d64eeb112775885c4c08d2ee4e356cc2db58154dde48a0a1e4",
|
||||
"0x091c71601a71f28ed0f6aeb59cf8a7bf29ce7dd3203352099920086e02007496260b811e85a0cd244d56f199b357d5c3a54f897fea21637698943679d07b33db8d",
|
||||
"0x092a66de31cef7b4b195772a2b96edba3ca7d97a5bbe974d071c37f0d0ca0545be0be9ca0dd4c9d62ec3ba0a404713fefe6b62391ba3f6d283a47e83fdb18c3a4e",
|
||||
"0x09093842042d196ae30784d31ed1526dd5d60cabe292eb5333e42936a2edbbaf1d237998efa424724063547c02cfa835ebfc24131315c34229b363f46fefda33ee",
|
||||
"0x0911637da97122f89f421a4564d5328893ff4b5de123aecad06e07ea45d9622b87096a296e974b5eda0f2d35cb5531c4a55f3c1e181a8bb4a0b33399e7c93853d4",
|
||||
"0x0921feeaba62a4ad78791d662737a3fa52a527dcd892f5d3af2cfbed4b6591d50f2fae639afb8ab4640a2d166616a4803442b26b9a8f5148a1c88adda1e2d911da",
|
||||
"0x090ddbe424e9368f262ef80a144383fc4f518b27200f7a61a996a075b3b84ab5041c755907f230eea27d060fa827a5743c7046cd0dc7487047bc3a7d222d65d2d7",
|
||||
"0x092d6e65349fd6751353b4b72fdd03d3ee4b1721efb434db76679c1c348b60fdc0177c7d961201138c98f85daf8a49b7a083a41e77dcd819d359b3db55c4a941a9",
|
||||
"0x090b0d48518cb602b73a86bd7b2294d401e6ad4851e3c7549fc7d23eea017eadd72e3245236b50c7f256de16bae063df6221b8331443c9d3a79e867fd77dd85cee",
|
||||
"0x07062bf32f202ec2afa65dfa84fffc76b5c05309768078544920f9b56d021606ce0b7371683425d088ad37f063ee847a9accac416314f1308cce69a8beeb2d2ab7",
|
||||
"0x090ffc989b8556e69159e246cb74cf7a2e30df63e9b7dba76ede73996ab60d9799063ca19e1d436cea189d17c5d93b8da0fa11b3ee88de1030602d1e8087cbb3da",
|
||||
"0x070000000000000000000000000000000000000000000000000000000000000000084f906a52b7da7bf35f3cc2431b40cfb90884c2ec0b579c9c096aea959509f7",
|
||||
"0x0620b6c0072d699768c0b52df46b97dae979a14788ed54dad1d7ce67db6e036a07291784b726760c2d728e4084d95df6d1534e27284c8ae2eeb56a80210f37da2b",
|
||||
"0x041245637ec55bae3c02f990e3cc3bf59cc05f515731cfa59ee55f8164953f8965050800000000000000000000000000000000000000000000000000ac000000000000000100000000000000000000000000000000000000000000000000000000000000000f68a43f5508e9c1f845406d9a507b612f97530746e59b93c8705f1a7cb0b93451e52f95aea13b1bc1f37dfbf797bfe7cea82a8c82da148f507e1ef2036fea8314b9fb07c4311e129d72b858c37b6bbe09c616f78416cb53d6e83360aff7b99c20c73bfbd94fb1fd860997d4e76d116bde0333beef000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
],
|
||||
storageProof: [
|
||||
"0x041d3c5f8c36e5da873d45bfa1d2399a572ac77493ec089cbf88a37b9e9442842201010000000000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
],
|
||||
},
|
||||
@@ -195,13 +272,9 @@ describe("ZkTrieVerifier", async () => {
|
||||
beforeEach(async () => {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
|
||||
const Poseidon2Elements = new ethers.ContractFactory(
|
||||
poseidonUnit.generateABI(2),
|
||||
poseidonUnit.createCode(2),
|
||||
deployer
|
||||
);
|
||||
const PoseidonHashWithDomainFactory = new ethers.ContractFactory(generateABI(2), createCode(2), deployer);
|
||||
|
||||
const poseidon = await Poseidon2Elements.deploy();
|
||||
const poseidon = await PoseidonHashWithDomainFactory.deploy();
|
||||
await poseidon.deployed();
|
||||
|
||||
const MockZkTrieVerifier = await ethers.getContractFactory("MockZkTrieVerifier", deployer);
|
||||
@@ -209,6 +282,17 @@ describe("ZkTrieVerifier", async () => {
|
||||
await verifier.deployed();
|
||||
});
|
||||
|
||||
const shouldRevert = async (test: ITestConfig, reason: string, extra?: string) => {
|
||||
const proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
extra || "0x",
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).to.revertedWith(reason);
|
||||
};
|
||||
|
||||
for (const test of testcases) {
|
||||
it(`should succeed for block[${test.block}] desc[${test.desc}] account[${test.account}] storage[${test.storage}]`, async () => {
|
||||
const proof = concat([
|
||||
@@ -224,193 +308,277 @@ describe("ZkTrieVerifier", async () => {
|
||||
});
|
||||
}
|
||||
|
||||
it("should revert, when parent node invalid", async () => {
|
||||
it("should revert, when InvalidBranchNodeType", async () => {
|
||||
const test = testcases[0];
|
||||
test.accountProof[0] =
|
||||
"0x010a52b818e0a009930d62c17f2b1244179b7c14f8e1ae317fb3bfd3a3ba6060031b2a4aa2df31e79f926474987eea69aab84f4581cfd61b0338438110f6be145b";
|
||||
const proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid parent node");
|
||||
for (const i of [0, 1, test.accountProof.length - 3]) {
|
||||
const correct = test.accountProof[i];
|
||||
const prefix = correct.slice(0, 4);
|
||||
for (let b = 0; b < 16; ++b) {
|
||||
if (b >= 6 && b < 10) continue;
|
||||
test.accountProof[i] = test.accountProof[i].replace(prefix, "0x" + chars[b >> 4] + chars[b % 16]);
|
||||
await shouldRevert(test, "InvalidBranchNodeType");
|
||||
test.accountProof[i] = correct;
|
||||
}
|
||||
}
|
||||
|
||||
test.accountProof[0] =
|
||||
"0x000a52b818e0a009930d62c17f2b1244179b7c14f8e1ae317fb3bfd3a3ba6060031b2a4aa2df31e79f926474987eea69aab84f4581cfd61b0338438110f6be145b";
|
||||
test.storageProof[0] =
|
||||
"0x010a52b818e0a009930d62c17f2b1244179b7c14f8e1ae317fb3bfd3a3ba6060031b2a4aa2df31e79f926474987eea69aab84f4581cfd61b0338438110f6be145b";
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid parent node");
|
||||
for (const i of [0, 1, test.storageProof.length - 3]) {
|
||||
const correct = test.storageProof[i];
|
||||
const prefix = correct.slice(0, 4);
|
||||
for (let b = 0; b < 16; ++b) {
|
||||
if (b >= 6 && b < 10) continue;
|
||||
test.storageProof[i] = test.storageProof[i].replace(prefix, "0x" + chars[b >> 4] + chars[b % 16]);
|
||||
await shouldRevert(test, "InvalidBranchNodeType");
|
||||
test.storageProof[i] = correct;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when hash mismatch", async () => {
|
||||
it("should revert, when BranchHashMismatch", async () => {
|
||||
const test = testcases[0];
|
||||
test.accountProof[1] =
|
||||
"0x0028db7c407cab6652f1f194401bd87bda33c9a1723b4f93515bd5929cad02668123fa5a3e69136c8e03a62c805f89c9d3578a6f5fac4bb281fc4d7df12fbcc5dc";
|
||||
const proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Hash mismatch");
|
||||
for (const i of [1, 2, test.accountProof.length - 3]) {
|
||||
const correct = test.accountProof[i];
|
||||
for (const p of [40, 98]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.accountProof[i] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "BranchHashMismatch");
|
||||
test.accountProof[i] = correct;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const i of [1, 2, test.storageProof.length - 3]) {
|
||||
const correct = test.storageProof[i];
|
||||
for (const p of [40, 98]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.storageProof[i] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "BranchHashMismatch");
|
||||
test.storageProof[i] = correct;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when invalid proof magic bytes", async () => {
|
||||
it("should revert, when InvalidAccountLeafNodeType", async () => {
|
||||
const test = testcases[0];
|
||||
test.accountProof[17] =
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704448";
|
||||
const proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid ProofMagicBytes");
|
||||
const index = test.accountProof.length - 2;
|
||||
const correct = test.accountProof[index];
|
||||
const prefix = correct.slice(0, 4);
|
||||
for (let b = 0; b < 20; ++b) {
|
||||
if (b === 4 || b === 5) continue;
|
||||
test.accountProof[index] = test.accountProof[index].replace(prefix, "0x" + chars[b >> 4] + chars[b % 16]);
|
||||
await shouldRevert(test, "InvalidAccountLeafNodeType");
|
||||
test.accountProof[index] = correct;
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when invalid leaf node in account proof", async () => {
|
||||
it("should revert, when AccountKeyMismatch", async () => {
|
||||
const test = testcases[0];
|
||||
// Invalid leaf node in account proof
|
||||
test.accountProof[16] =
|
||||
"0x000aef26efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071d0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681205300000000000000000000000000000000000004000000000000000000000000";
|
||||
let proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid leaf node");
|
||||
|
||||
// Node key mismatch in account proof
|
||||
test.accountProof[16] =
|
||||
"0x010aef16efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071d0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681205300000000000000000000000000000000000004000000000000000000000000";
|
||||
proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Node key mismatch");
|
||||
|
||||
// Invalid leaf node hash in account proof
|
||||
test.accountProof[16] =
|
||||
"0x010aef26efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071e0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681205300000000000000000000000000000000000004000000000000000000000000";
|
||||
proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid leaf node hash");
|
||||
|
||||
// Invalid KeyPreimage length in account proof
|
||||
test.accountProof[16] =
|
||||
"0x010aef26efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071d0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681215300000000000000000000000000000000000004000000000000000000000000";
|
||||
proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith(
|
||||
"Invalid KeyPreimage length"
|
||||
);
|
||||
|
||||
// Invalid KeyPreimage in account proof
|
||||
test.accountProof[16] =
|
||||
"0x010aef26efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071d0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681205300000000000000000000000000000000000003000000000000000000000000";
|
||||
proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid KeyPreimage");
|
||||
const index = test.accountProof.length - 2;
|
||||
const correct = test.accountProof[index];
|
||||
for (const p of [4, 10]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.accountProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "AccountKeyMismatch");
|
||||
test.accountProof[index] = correct;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when storage root mismatch", async () => {
|
||||
it("should revert, when InvalidAccountCompressedFlag", async () => {
|
||||
const test = testcases[0];
|
||||
test.storageProof[0] =
|
||||
"0x000a52b818e0a009930d62c17f2b1244179b7c14f8e1ae317fb3bfd3a3ba6060031b2a4aa2df31e79f926474987eea69aab84f4581cfd61b0338438110f6be145c";
|
||||
const proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Storage root mismatch");
|
||||
const index = test.accountProof.length - 2;
|
||||
const correct = test.accountProof[index];
|
||||
for (const replaced of ["01080000", "05010000"]) {
|
||||
test.accountProof[index] = test.accountProof[index].replace("05080000", replaced);
|
||||
await shouldRevert(test, "InvalidAccountCompressedFlag");
|
||||
test.accountProof[index] = correct;
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when invalid leaf node in storage proof", async () => {
|
||||
it("should revert, when InvalidAccountLeafNodeHash", async () => {
|
||||
const test = testcases[0];
|
||||
// Invalid leaf node in account proof
|
||||
test.storageProof[15] =
|
||||
"0x0026ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111346048bf18a14a209505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a";
|
||||
let proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid leaf node");
|
||||
|
||||
// Node key mismatch in account proof
|
||||
test.storageProof[15] =
|
||||
"0x0136ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111346048bf18a14a209505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a";
|
||||
proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Node key mismatch");
|
||||
|
||||
// Invalid leaf node hash in account proof
|
||||
test.storageProof[15] =
|
||||
"0x0126ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111446048bf18a14a209505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a";
|
||||
proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid leaf node hash");
|
||||
|
||||
// Invalid KeyPreimage length in account proof
|
||||
test.storageProof[15] =
|
||||
"0x0126ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111346048bf18a14a219505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a";
|
||||
proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith(
|
||||
"Invalid KeyPreimage length"
|
||||
);
|
||||
|
||||
// Invalid KeyPreimage in account proof
|
||||
test.storageProof[15] =
|
||||
"0x0126ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111346048bf18a14a209505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b97a";
|
||||
proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid KeyPreimage");
|
||||
const index = test.accountProof.length - 2;
|
||||
const correct = test.accountProof[index];
|
||||
for (const p of [80, 112, 144, 176, 208]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.accountProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "InvalidAccountLeafNodeHash");
|
||||
test.accountProof[index] = correct;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when proof length mismatch", async () => {
|
||||
it("should revert, when InvalidAccountKeyPreimageLength", async () => {
|
||||
const test = testcases[0];
|
||||
const proof = concat([
|
||||
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.accountProof,
|
||||
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
|
||||
...test.storageProof,
|
||||
"0x00",
|
||||
]);
|
||||
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Proof length mismatch");
|
||||
const index = test.accountProof.length - 2;
|
||||
const correct = test.accountProof[index];
|
||||
for (const p of [396, 397]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.accountProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "InvalidAccountKeyPreimageLength");
|
||||
test.accountProof[index] = correct;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when InvalidAccountKeyPreimage", async () => {
|
||||
const test = testcases[0];
|
||||
const index = test.accountProof.length - 2;
|
||||
const correct = test.accountProof[index];
|
||||
for (const p of [398, 438]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.accountProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "InvalidAccountKeyPreimage");
|
||||
test.accountProof[index] = correct;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when InvalidProofMagicBytes", async () => {
|
||||
const test = testcases[0];
|
||||
let index = test.accountProof.length - 1;
|
||||
let correct = test.accountProof[index];
|
||||
for (const p of [2, 32, 91]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.accountProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "InvalidProofMagicBytes");
|
||||
test.accountProof[index] = correct;
|
||||
}
|
||||
}
|
||||
|
||||
index = test.storageProof.length - 1;
|
||||
correct = test.storageProof[index];
|
||||
for (const p of [2, 32, 91]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.storageProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "InvalidProofMagicBytes");
|
||||
test.storageProof[index] = correct;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when InvalidAccountLeafNodeHash", async () => {
|
||||
const test = testcases[0];
|
||||
const correct = test.storageProof.slice();
|
||||
test.storageProof = [
|
||||
"0x05",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
|
||||
];
|
||||
await shouldRevert(test, "InvalidAccountLeafNodeHash");
|
||||
test.storageProof = correct;
|
||||
});
|
||||
|
||||
it("should revert, when InvalidStorageLeafNodeType", async () => {
|
||||
const test = testcases[0];
|
||||
const index = test.storageProof.length - 2;
|
||||
const correct = test.storageProof[index];
|
||||
const prefix = correct.slice(0, 4);
|
||||
for (let b = 0; b < 20; ++b) {
|
||||
if (b === 4 || b === 5) continue;
|
||||
test.storageProof[index] = test.storageProof[index].replace(prefix, "0x" + chars[b >> 4] + chars[b % 16]);
|
||||
await shouldRevert(test, "InvalidStorageLeafNodeType");
|
||||
test.storageProof[index] = correct;
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when StorageKeyMismatch", async () => {
|
||||
const test = testcases[0];
|
||||
const index = test.storageProof.length - 2;
|
||||
const correct = test.storageProof[index];
|
||||
for (const p of [4, 10]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.storageProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "StorageKeyMismatch");
|
||||
test.storageProof[index] = correct;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when InvalidStorageCompressedFlag", async () => {
|
||||
const test = testcases[0];
|
||||
const index = test.storageProof.length - 2;
|
||||
const correct = test.storageProof[index];
|
||||
for (const replaced of ["00010000", "01000000"]) {
|
||||
test.storageProof[index] = test.storageProof[index].replace("01010000", replaced);
|
||||
await shouldRevert(test, "InvalidStorageCompressedFlag");
|
||||
test.storageProof[index] = correct;
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when InvalidStorageLeafNodeHash", async () => {
|
||||
const test = testcases[0];
|
||||
const index = test.storageProof.length - 2;
|
||||
const correct = test.storageProof[index];
|
||||
for (const p of [100, 132]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.storageProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "InvalidStorageLeafNodeHash");
|
||||
test.storageProof[index] = correct;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when InvalidStorageKeyPreimageLength", async () => {
|
||||
const test = testcases[0];
|
||||
const index = test.storageProof.length - 2;
|
||||
const correct = test.storageProof[index];
|
||||
for (const p of [140, 141]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.storageProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "InvalidStorageKeyPreimageLength");
|
||||
test.storageProof[index] = correct;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when InvalidStorageKeyPreimage", async () => {
|
||||
const test = testcases[0];
|
||||
const index = test.storageProof.length - 2;
|
||||
const correct = test.storageProof[index];
|
||||
for (const p of [142, 205]) {
|
||||
const v = correct[p];
|
||||
for (let b = 0; b < 3; ++b) {
|
||||
if (v === chars[b]) continue;
|
||||
test.storageProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
|
||||
await shouldRevert(test, "InvalidStorageKeyPreimage");
|
||||
test.storageProof[index] = correct;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should revert, when InvalidStorageEmptyLeafNodeHash", async () => {
|
||||
const test = testcases[0];
|
||||
const index = test.storageProof.length - 2;
|
||||
const correct = test.storageProof[index];
|
||||
test.storageProof[index] = "0x05";
|
||||
await shouldRevert(test, "InvalidStorageEmptyLeafNodeHash");
|
||||
test.storageProof[index] = correct;
|
||||
});
|
||||
|
||||
it("should revert, when ProofLengthMismatch", async () => {
|
||||
const test = testcases[0];
|
||||
await shouldRevert(test, "ProofLengthMismatch", "0x0000");
|
||||
});
|
||||
});
|
||||
|
||||
1154
contracts/integration-test/testdata/poseidon_hash_with_domain.data
vendored
Normal file
1154
contracts/integration-test/testdata/poseidon_hash_with_domain.data
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
import * as dotenv from "dotenv";
|
||||
|
||||
import { ethers } from "hardhat";
|
||||
import poseidonUnit from "circomlib/src/poseidon_gencontract";
|
||||
import { generateABI, createCode } from "../scripts/poseidon";
|
||||
|
||||
dotenv.config();
|
||||
|
||||
@@ -15,11 +15,7 @@ async function main() {
|
||||
let PoseidonUnit2Address = process.env.POSEIDON_UNIT2_ADDR;
|
||||
|
||||
if (!PoseidonUnit2Address) {
|
||||
const Poseidon2Elements = new ethers.ContractFactory(
|
||||
poseidonUnit.generateABI(2),
|
||||
poseidonUnit.createCode(2),
|
||||
deployer
|
||||
);
|
||||
const Poseidon2Elements = new ethers.ContractFactory(generateABI(2), createCode(2), deployer);
|
||||
|
||||
const poseidon = await Poseidon2Elements.deploy();
|
||||
console.log("Deploy PoseidonUnit2 contract, hash:", poseidon.deployTransaction.hash);
|
||||
@@ -28,7 +24,9 @@ async function main() {
|
||||
PoseidonUnit2Address = poseidon.address;
|
||||
}
|
||||
|
||||
const verifier = await ScrollChainCommitmentVerifier.deploy(PoseidonUnit2Address, L1ScrollChainAddress);
|
||||
const verifier = await ScrollChainCommitmentVerifier.deploy(PoseidonUnit2Address, L1ScrollChainAddress, {
|
||||
gasPrice: 1e9,
|
||||
});
|
||||
console.log("Deploy ScrollChainCommitmentVerifier contract, hash:", verifier.deployTransaction.hash);
|
||||
const receipt = await verifier.deployTransaction.wait();
|
||||
console.log(`✅ Deploy ScrollChainCommitmentVerifier contract at: ${verifier.address}, gas used: ${receipt.gasUsed}`);
|
||||
|
||||
@@ -31,6 +31,8 @@ import {ScrollStandardERC20Factory} from "../../src/libraries/token/ScrollStanda
|
||||
contract DeployL2BridgeContracts is Script {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
address L2_PROXY_ADMIN_ADDR = vm.envAddress("L2_PROXY_ADMIN_ADDR");
|
||||
|
||||
address L1_TX_FEE_RECIPIENT_ADDR = vm.envAddress("L1_TX_FEE_RECIPIENT_ADDR");
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
@@ -58,6 +60,8 @@ contract DeployL2BridgeContracts is Script {
|
||||
address L2_WHITELIST_PREDEPLOY_ADDR = vm.envOr("L2_WHITELIST_PREDEPLOY_ADDR", address(0));
|
||||
|
||||
function run() external {
|
||||
proxyAdmin = ProxyAdmin(L2_PROXY_ADMIN_ADDR);
|
||||
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
// predeploys
|
||||
@@ -67,7 +71,6 @@ contract DeployL2BridgeContracts is Script {
|
||||
deployL2Whitelist();
|
||||
|
||||
// upgradable
|
||||
deployProxyAdmin();
|
||||
deployL2ScrollMessenger();
|
||||
deployL2GatewayRouter();
|
||||
deployScrollStandardERC20Factory();
|
||||
@@ -131,12 +134,6 @@ contract DeployL2BridgeContracts is Script {
|
||||
logAddress("L2_WHITELIST_ADDR", address(whitelist));
|
||||
}
|
||||
|
||||
function deployProxyAdmin() internal {
|
||||
proxyAdmin = new ProxyAdmin();
|
||||
|
||||
logAddress("L2_PROXY_ADMIN_ADDR", address(proxyAdmin));
|
||||
}
|
||||
|
||||
function deployL2ScrollMessenger() internal {
|
||||
L2ScrollMessenger impl = new L2ScrollMessenger(L1_SCROLL_MESSENGER_PROXY_ADDR, address(queue));
|
||||
|
||||
|
||||
63
contracts/scripts/foundry/DeployLidoGateway.s.sol
Normal file
63
contracts/scripts/foundry/DeployLidoGateway.s.sol
Normal file
@@ -0,0 +1,63 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {L1LidoGateway} from "../../src/lido/L1LidoGateway.sol";
|
||||
import {L2LidoGateway} from "../../src/lido/L2LidoGateway.sol";
|
||||
|
||||
// solhint-disable state-visibility
|
||||
// solhint-disable var-name-mixedcase
|
||||
|
||||
contract DeployLidoGateway is Script {
|
||||
string NETWORK = vm.envString("NETWORK");
|
||||
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
address L1_WSTETH_ADDR = vm.envAddress("L1_WSTETH_ADDR");
|
||||
|
||||
address L2_WSTETH_ADDR = vm.envAddress("L2_WSTETH_ADDR");
|
||||
|
||||
address L1_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L1_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L1_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L1_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L1_LIDO_GATEWAY_PROXY_ADDR = vm.envAddress("L1_LIDO_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L2_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L2_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L2_LIDO_GATEWAY_PROXY_ADDR = vm.envAddress("L2_LIDO_GATEWAY_PROXY_ADDR");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
if (keccak256(abi.encodePacked(NETWORK)) == keccak256(abi.encodePacked("L1"))) {
|
||||
// deploy l1 lido gateway
|
||||
L1LidoGateway gateway = new L1LidoGateway(
|
||||
L1_WSTETH_ADDR,
|
||||
L2_WSTETH_ADDR,
|
||||
L2_LIDO_GATEWAY_PROXY_ADDR,
|
||||
L1_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
logAddress("L1_LIDO_GATEWAY_IMPLEMENTATION_ADDR", address(gateway));
|
||||
} else if (keccak256(abi.encodePacked(NETWORK)) == keccak256(abi.encodePacked("L2"))) {
|
||||
// deploy l2 lido gateway
|
||||
L2LidoGateway gateway = new L2LidoGateway(
|
||||
L1_WSTETH_ADDR,
|
||||
L2_WSTETH_ADDR,
|
||||
L1_LIDO_GATEWAY_PROXY_ADDR,
|
||||
L2_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
logAddress("L2_LIDO_GATEWAY_IMPLEMENTATION_ADDR", address(gateway));
|
||||
}
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
@@ -78,15 +78,21 @@ contract InitializeL1BridgeContracts is Script {
|
||||
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
// note: we use call upgrade(...) and initialize(...) instead of upgradeAndCall(...),
|
||||
// otherwise the contract owner would become ProxyAdmin.
|
||||
|
||||
// initialize ScrollChain
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L1_SCROLL_CHAIN_PROXY_ADDR),
|
||||
L1_SCROLL_CHAIN_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
ScrollChain.initialize,
|
||||
(L1_MESSAGE_QUEUE_PROXY_ADDR, L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR, MAX_TX_IN_CHUNK)
|
||||
)
|
||||
L1_SCROLL_CHAIN_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).initialize(
|
||||
L1_MESSAGE_QUEUE_PROXY_ADDR,
|
||||
L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR,
|
||||
MAX_TX_IN_CHUNK
|
||||
);
|
||||
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addSequencer(L1_COMMIT_SENDER_ADDRESS);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addProver(L1_FINALIZE_SENDER_ADDRESS);
|
||||
|
||||
@@ -103,35 +109,32 @@ contract InitializeL1BridgeContracts is Script {
|
||||
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).updateWhitelist(L1_WHITELIST_ADDR);
|
||||
|
||||
// initialize L1MessageQueueWithGasPriceOracle
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L1_MESSAGE_QUEUE_PROXY_ADDR),
|
||||
L1_MESSAGE_QUEUE_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
L1MessageQueue.initialize,
|
||||
(
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L1_SCROLL_CHAIN_PROXY_ADDR,
|
||||
L1_ENFORCED_TX_GATEWAY_PROXY_ADDR,
|
||||
L2_GAS_PRICE_ORACLE_PROXY_ADDR,
|
||||
MAX_L1_MESSAGE_GAS_LIMIT
|
||||
)
|
||||
)
|
||||
L1_MESSAGE_QUEUE_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L1MessageQueueWithGasPriceOracle(L1_MESSAGE_QUEUE_PROXY_ADDR).initialize(
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L1_SCROLL_CHAIN_PROXY_ADDR,
|
||||
L1_ENFORCED_TX_GATEWAY_PROXY_ADDR,
|
||||
L2_GAS_PRICE_ORACLE_PROXY_ADDR,
|
||||
MAX_L1_MESSAGE_GAS_LIMIT
|
||||
);
|
||||
|
||||
L1MessageQueueWithGasPriceOracle(L1_MESSAGE_QUEUE_PROXY_ADDR).initializeV2();
|
||||
|
||||
// initialize L1ScrollMessenger
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L1_SCROLL_MESSENGER_PROXY_ADDR),
|
||||
L1_SCROLL_MESSENGER_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
L1ScrollMessenger.initialize,
|
||||
(
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L1_FEE_VAULT_ADDR,
|
||||
L1_SCROLL_CHAIN_PROXY_ADDR,
|
||||
L1_MESSAGE_QUEUE_PROXY_ADDR
|
||||
)
|
||||
)
|
||||
L1_SCROLL_MESSENGER_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L1ScrollMessenger(payable(L1_SCROLL_MESSENGER_PROXY_ADDR)).initialize(
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L1_FEE_VAULT_ADDR,
|
||||
L1_SCROLL_CHAIN_PROXY_ADDR,
|
||||
L1_MESSAGE_QUEUE_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize EnforcedTxGateway
|
||||
@@ -147,63 +150,72 @@ contract InitializeL1BridgeContracts is Script {
|
||||
);
|
||||
|
||||
// initialize L1CustomERC20Gateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR),
|
||||
L1_CUSTOM_ERC20_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
L1CustomERC20Gateway.initialize,
|
||||
(L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR, L1_GATEWAY_ROUTER_PROXY_ADDR, L1_SCROLL_MESSENGER_PROXY_ADDR)
|
||||
)
|
||||
L1_CUSTOM_ERC20_GATEWAY_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L1CustomERC20Gateway(L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR).initialize(
|
||||
L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR,
|
||||
L1_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize L1ERC1155Gateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L1_ERC1155_GATEWAY_PROXY_ADDR),
|
||||
L1_ERC1155_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(L1ERC1155Gateway.initialize, (L2_ERC1155_GATEWAY_PROXY_ADDR, L1_SCROLL_MESSENGER_PROXY_ADDR))
|
||||
L1_ERC1155_GATEWAY_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L1ERC1155Gateway(L1_ERC1155_GATEWAY_PROXY_ADDR).initialize(
|
||||
L2_ERC1155_GATEWAY_PROXY_ADDR,
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize L1ERC721Gateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L1_ERC721_GATEWAY_PROXY_ADDR),
|
||||
L1_ERC721_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(L1ERC721Gateway.initialize, (L2_ERC721_GATEWAY_PROXY_ADDR, L1_SCROLL_MESSENGER_PROXY_ADDR))
|
||||
L1_ERC721_GATEWAY_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L1ERC721Gateway(L1_ERC721_GATEWAY_PROXY_ADDR).initialize(
|
||||
L2_ERC721_GATEWAY_PROXY_ADDR,
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize L1ETHGateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
ITransparentUpgradeableProxy(L1_ETH_GATEWAY_PROXY_ADDR),
|
||||
L1_ETH_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
L1ETHGateway.initialize,
|
||||
(L2_ETH_GATEWAY_PROXY_ADDR, L1_GATEWAY_ROUTER_PROXY_ADDR, L1_SCROLL_MESSENGER_PROXY_ADDR)
|
||||
)
|
||||
proxyAdmin.upgrade(ITransparentUpgradeableProxy(L1_ETH_GATEWAY_PROXY_ADDR), L1_ETH_GATEWAY_IMPLEMENTATION_ADDR);
|
||||
|
||||
L1ETHGateway(L1_ETH_GATEWAY_PROXY_ADDR).initialize(
|
||||
L2_ETH_GATEWAY_PROXY_ADDR,
|
||||
L1_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize L1StandardERC20Gateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR),
|
||||
L1_STANDARD_ERC20_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
L1StandardERC20Gateway.initialize,
|
||||
(
|
||||
L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR,
|
||||
L1_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L2_SCROLL_STANDARD_ERC20_ADDR,
|
||||
L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR
|
||||
)
|
||||
)
|
||||
L1_STANDARD_ERC20_GATEWAY_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L1StandardERC20Gateway(L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR).initialize(
|
||||
L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR,
|
||||
L1_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L2_SCROLL_STANDARD_ERC20_ADDR,
|
||||
L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR
|
||||
);
|
||||
|
||||
// initialize L1WETHGateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L1_WETH_GATEWAY_PROXY_ADDR),
|
||||
L1_WETH_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
L1WETHGateway.initialize,
|
||||
(L2_WETH_GATEWAY_PROXY_ADDR, L1_GATEWAY_ROUTER_PROXY_ADDR, L1_SCROLL_MESSENGER_PROXY_ADDR)
|
||||
)
|
||||
L1_WETH_GATEWAY_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L1WETHGateway(payable(L1_WETH_GATEWAY_PROXY_ADDR)).initialize(
|
||||
L2_WETH_GATEWAY_PROXY_ADDR,
|
||||
L1_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L1_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// set WETH gateway in router
|
||||
|
||||
@@ -67,6 +67,9 @@ contract InitializeL2BridgeContracts is Script {
|
||||
|
||||
vm.startBroadcast(deployerPrivateKey);
|
||||
|
||||
// note: we use call upgrade(...) and initialize(...) instead of upgradeAndCall(...),
|
||||
// otherwise the contract owner would become ProxyAdmin.
|
||||
|
||||
// initialize L2MessageQueue
|
||||
L2MessageQueue(L2_MESSAGE_QUEUE_ADDR).initialize(L2_SCROLL_MESSENGER_PROXY_ADDR);
|
||||
|
||||
@@ -77,12 +80,13 @@ contract InitializeL2BridgeContracts is Script {
|
||||
L1GasPriceOracle(L1_GAS_PRICE_ORACLE_ADDR).updateWhitelist(L2_WHITELIST_ADDR);
|
||||
|
||||
// initialize L2ScrollMessenger
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L2_SCROLL_MESSENGER_PROXY_ADDR),
|
||||
L2_SCROLL_MESSENGER_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(L2ScrollMessenger.initialize, (L1_SCROLL_MESSENGER_PROXY_ADDR))
|
||||
L2_SCROLL_MESSENGER_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L2ScrollMessenger(payable(L2_SCROLL_MESSENGER_PROXY_ADDR)).initialize(L1_SCROLL_MESSENGER_PROXY_ADDR);
|
||||
|
||||
// initialize L2GatewayRouter
|
||||
L2GatewayRouter(L2_GATEWAY_ROUTER_PROXY_ADDR).initialize(
|
||||
L2_ETH_GATEWAY_PROXY_ADDR,
|
||||
@@ -90,62 +94,71 @@ contract InitializeL2BridgeContracts is Script {
|
||||
);
|
||||
|
||||
// initialize L2CustomERC20Gateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR),
|
||||
L2_CUSTOM_ERC20_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
L2CustomERC20Gateway.initialize,
|
||||
(L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR, L2_GATEWAY_ROUTER_PROXY_ADDR, L2_SCROLL_MESSENGER_PROXY_ADDR)
|
||||
)
|
||||
L2_CUSTOM_ERC20_GATEWAY_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L2CustomERC20Gateway(L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR).initialize(
|
||||
L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR,
|
||||
L2_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize L2ERC1155Gateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L2_ERC1155_GATEWAY_PROXY_ADDR),
|
||||
L2_ERC1155_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(L2ERC1155Gateway.initialize, (L1_ERC1155_GATEWAY_PROXY_ADDR, L2_SCROLL_MESSENGER_PROXY_ADDR))
|
||||
L2_ERC1155_GATEWAY_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L2ERC1155Gateway(L2_ERC1155_GATEWAY_PROXY_ADDR).initialize(
|
||||
L1_ERC1155_GATEWAY_PROXY_ADDR,
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize L2ERC721Gateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L2_ERC721_GATEWAY_PROXY_ADDR),
|
||||
L2_ERC721_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(L2ERC721Gateway.initialize, (L1_ERC721_GATEWAY_PROXY_ADDR, L2_SCROLL_MESSENGER_PROXY_ADDR))
|
||||
L2_ERC721_GATEWAY_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L2ERC721Gateway(L2_ERC721_GATEWAY_PROXY_ADDR).initialize(
|
||||
L1_ERC721_GATEWAY_PROXY_ADDR,
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize L2ETHGateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
ITransparentUpgradeableProxy(L2_ETH_GATEWAY_PROXY_ADDR),
|
||||
L2_ETH_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
L2ETHGateway.initialize,
|
||||
(L1_ETH_GATEWAY_PROXY_ADDR, L2_GATEWAY_ROUTER_PROXY_ADDR, L2_SCROLL_MESSENGER_PROXY_ADDR)
|
||||
)
|
||||
proxyAdmin.upgrade(ITransparentUpgradeableProxy(L2_ETH_GATEWAY_PROXY_ADDR), L2_ETH_GATEWAY_IMPLEMENTATION_ADDR);
|
||||
|
||||
L2ETHGateway(L2_ETH_GATEWAY_PROXY_ADDR).initialize(
|
||||
L1_ETH_GATEWAY_PROXY_ADDR,
|
||||
L2_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// initialize L2StandardERC20Gateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR),
|
||||
L2_STANDARD_ERC20_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
L2StandardERC20Gateway.initialize,
|
||||
(
|
||||
L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR,
|
||||
L2_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR
|
||||
)
|
||||
)
|
||||
L2_STANDARD_ERC20_GATEWAY_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L2StandardERC20Gateway(L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR).initialize(
|
||||
L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR,
|
||||
L2_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR
|
||||
);
|
||||
|
||||
// initialize L2WETHGateway
|
||||
proxyAdmin.upgradeAndCall(
|
||||
proxyAdmin.upgrade(
|
||||
ITransparentUpgradeableProxy(L2_WETH_GATEWAY_PROXY_ADDR),
|
||||
L2_WETH_GATEWAY_IMPLEMENTATION_ADDR,
|
||||
abi.encodeCall(
|
||||
L2WETHGateway.initialize,
|
||||
(L1_WETH_GATEWAY_PROXY_ADDR, L2_GATEWAY_ROUTER_PROXY_ADDR, L2_SCROLL_MESSENGER_PROXY_ADDR)
|
||||
)
|
||||
L2_WETH_GATEWAY_IMPLEMENTATION_ADDR
|
||||
);
|
||||
|
||||
L2WETHGateway(payable(L2_WETH_GATEWAY_PROXY_ADDR)).initialize(
|
||||
L1_WETH_GATEWAY_PROXY_ADDR,
|
||||
L2_GATEWAY_ROUTER_PROXY_ADDR,
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR
|
||||
);
|
||||
|
||||
// set WETH gateway in router
|
||||
|
||||
202
contracts/scripts/poseidon.ts
Normal file
202
contracts/scripts/poseidon.ts
Normal file
@@ -0,0 +1,202 @@
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import { ethers } from "ethers";
|
||||
|
||||
import Contract from "circomlib/src/evmasm";
|
||||
import * as constants from "circomlib/src/poseidon_constants";
|
||||
|
||||
const N_ROUNDS_F = 8;
|
||||
const N_ROUNDS_P = [56, 57, 56, 60, 60, 63, 64, 63];
|
||||
|
||||
export function createCode(nInputs: number) {
|
||||
if (nInputs < 1 || nInputs > 8) throw new Error("Invalid number of inputs. Must be 1<=nInputs<=8");
|
||||
const t = nInputs + 1;
|
||||
const nRoundsF = N_ROUNDS_F;
|
||||
const nRoundsP = N_ROUNDS_P[t - 2];
|
||||
|
||||
const C = new Contract();
|
||||
|
||||
function saveM() {
|
||||
for (let i = 0; i < t; i++) {
|
||||
for (let j = 0; j < t; j++) {
|
||||
C.push(constants.M[t - 2][i][j]);
|
||||
C.push((1 + i * t + j) * 32);
|
||||
C.mstore();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function ark(r: number) {
|
||||
// st, q
|
||||
for (let i = 0; i < t; i++) {
|
||||
C.dup(t); // q, st, q
|
||||
C.push(constants.C[t - 2][r * t + i]); // K, q, st, q
|
||||
C.dup(2 + i); // st[i], K, q, st, q
|
||||
C.addmod(); // newSt[i], st, q
|
||||
C.swap(1 + i); // xx, st, q
|
||||
C.pop();
|
||||
}
|
||||
}
|
||||
|
||||
function sigma(p: number) {
|
||||
// sq, q
|
||||
C.dup(t); // q, st, q
|
||||
C.dup(1 + p); // st[p] , q , st, q
|
||||
C.dup(1); // q, st[p] , q , st, q
|
||||
C.dup(0); // q, q, st[p] , q , st, q
|
||||
C.dup(2); // st[p] , q, q, st[p] , q , st, q
|
||||
C.dup(0); // st[p] , st[p] , q, q, st[p] , q , st, q
|
||||
C.mulmod(); // st2[p], q, st[p] , q , st, q
|
||||
C.dup(0); // st2[p], st2[p], q, st[p] , q , st, q
|
||||
C.mulmod(); // st4[p], st[p] , q , st, q
|
||||
C.mulmod(); // st5[p], st, q
|
||||
C.swap(1 + p);
|
||||
C.pop(); // newst, q
|
||||
}
|
||||
|
||||
function mix() {
|
||||
C.label("mix");
|
||||
for (let i = 0; i < t; i++) {
|
||||
for (let j = 0; j < t; j++) {
|
||||
if (j === 0) {
|
||||
C.dup(i + t); // q, newSt, oldSt, q
|
||||
C.push((1 + i * t + j) * 32);
|
||||
C.mload(); // M, q, newSt, oldSt, q
|
||||
C.dup(2 + i + j); // oldSt[j], M, q, newSt, oldSt, q
|
||||
C.mulmod(); // acc, newSt, oldSt, q
|
||||
} else {
|
||||
C.dup(1 + i + t); // q, acc, newSt, oldSt, q
|
||||
C.push((1 + i * t + j) * 32);
|
||||
C.mload(); // M, q, acc, newSt, oldSt, q
|
||||
C.dup(3 + i + j); // oldSt[j], M, q, acc, newSt, oldSt, q
|
||||
C.mulmod(); // aux, acc, newSt, oldSt, q
|
||||
C.dup(2 + i + t); // q, aux, acc, newSt, oldSt, q
|
||||
C.swap(2); // acc, aux, q, newSt, oldSt, q
|
||||
C.addmod(); // acc, newSt, oldSt, q
|
||||
}
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < t; i++) {
|
||||
C.swap(t - i + (t - i - 1));
|
||||
C.pop();
|
||||
}
|
||||
C.push(0);
|
||||
C.mload();
|
||||
C.jmp();
|
||||
}
|
||||
|
||||
// Check selector
|
||||
C.push("0x0100000000000000000000000000000000000000000000000000000000");
|
||||
C.push(0);
|
||||
C.calldataload();
|
||||
C.div();
|
||||
C.dup(0);
|
||||
C.push(ethers.utils.keccak256(ethers.utils.toUtf8Bytes(`poseidon(uint256[${nInputs}],uint256)`)).slice(0, 10)); // poseidon(uint256[n],uint256)
|
||||
C.eq();
|
||||
C.swap(1);
|
||||
C.push(ethers.utils.keccak256(ethers.utils.toUtf8Bytes(`poseidon(bytes32[${nInputs}],bytes32)`)).slice(0, 10)); // poseidon(bytes32[n],bytes32)
|
||||
C.eq();
|
||||
C.or();
|
||||
C.jmpi("start");
|
||||
C.invalid();
|
||||
|
||||
C.label("start");
|
||||
|
||||
saveM();
|
||||
|
||||
C.push("0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001"); // q
|
||||
|
||||
// Load t values from the call data.
|
||||
// The function has a single array param param
|
||||
// [Selector (4)] [item1 (32)] [item2 (32)] .... [doman (32)]
|
||||
// Stack positions 0-nInputs.
|
||||
for (let i = 0; i < nInputs; i++) {
|
||||
C.push(0x04 + 0x20 * (nInputs - i - 1));
|
||||
C.calldataload();
|
||||
}
|
||||
C.push(0x04 + 0x20 * nInputs);
|
||||
C.calldataload();
|
||||
|
||||
for (let i = 0; i < nRoundsF + nRoundsP; i++) {
|
||||
ark(i);
|
||||
if (i < nRoundsF / 2 || i >= nRoundsP + nRoundsF / 2) {
|
||||
for (let j = 0; j < t; j++) {
|
||||
sigma(j);
|
||||
}
|
||||
} else {
|
||||
sigma(0);
|
||||
}
|
||||
const strLabel = "aferMix" + i;
|
||||
C._pushLabel(strLabel);
|
||||
C.push(0);
|
||||
C.mstore();
|
||||
C.jmp("mix");
|
||||
C.label(strLabel);
|
||||
}
|
||||
|
||||
C.push("0x00");
|
||||
C.mstore(); // Save it to pos 0;
|
||||
C.push("0x20");
|
||||
C.push("0x00");
|
||||
C.return();
|
||||
|
||||
mix();
|
||||
|
||||
return C.createTxData();
|
||||
}
|
||||
|
||||
export function generateABI(nInputs: number) {
|
||||
return [
|
||||
{
|
||||
constant: true,
|
||||
inputs: [
|
||||
{
|
||||
internalType: `bytes32[${nInputs}]`,
|
||||
name: "input",
|
||||
type: `bytes32[${nInputs}]`,
|
||||
},
|
||||
{
|
||||
internalType: "bytes32",
|
||||
name: "domain",
|
||||
type: "bytes32",
|
||||
},
|
||||
],
|
||||
name: "poseidon",
|
||||
outputs: [
|
||||
{
|
||||
internalType: "bytes32",
|
||||
name: "",
|
||||
type: "bytes32",
|
||||
},
|
||||
],
|
||||
payable: false,
|
||||
stateMutability: "pure",
|
||||
type: "function",
|
||||
},
|
||||
{
|
||||
constant: true,
|
||||
inputs: [
|
||||
{
|
||||
internalType: `uint256[${nInputs}]`,
|
||||
name: "input",
|
||||
type: `uint256[${nInputs}]`,
|
||||
},
|
||||
{
|
||||
internalType: "uint256",
|
||||
name: "domain",
|
||||
type: "uint256",
|
||||
},
|
||||
],
|
||||
name: "poseidon",
|
||||
outputs: [
|
||||
{
|
||||
internalType: "uint256",
|
||||
name: "",
|
||||
type: "uint256",
|
||||
},
|
||||
],
|
||||
payable: false,
|
||||
stateMutability: "pure",
|
||||
type: "function",
|
||||
},
|
||||
];
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {ScrollChain} from "./ScrollChain.sol";
|
||||
import {IScrollChain} from "./IScrollChain.sol";
|
||||
import {ZkTrieVerifier} from "../../libraries/verifier/ZkTrieVerifier.sol";
|
||||
|
||||
contract ScrollChainCommitmentVerifier {
|
||||
@@ -49,11 +49,11 @@ contract ScrollChainCommitmentVerifier {
|
||||
bytes32 storageKey,
|
||||
bytes calldata proof
|
||||
) external view returns (bytes32 storageValue) {
|
||||
require(ScrollChain(rollup).isBatchFinalized(batchIndex), "Batch not finalized");
|
||||
require(IScrollChain(rollup).isBatchFinalized(batchIndex), "Batch not finalized");
|
||||
|
||||
bytes32 computedStateRoot;
|
||||
(computedStateRoot, storageValue) = ZkTrieVerifier.verifyZkTrieProof(poseidon, account, storageKey, proof);
|
||||
bytes32 expectedStateRoot = ScrollChain(rollup).finalizedStateRoots(batchIndex);
|
||||
bytes32 expectedStateRoot = IScrollChain(rollup).finalizedStateRoots(batchIndex);
|
||||
require(computedStateRoot == expectedStateRoot, "Invalid inclusion proof");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,9 +2,7 @@
|
||||
|
||||
pragma solidity ^0.8.16;
|
||||
|
||||
interface PoseidonUnit2 {
|
||||
function poseidon(uint256[2] memory) external view returns (uint256);
|
||||
}
|
||||
// solhint-disable no-inline-assembly
|
||||
|
||||
library ZkTrieVerifier {
|
||||
/// @notice Internal function to validates a proof from eth_getProof.
|
||||
@@ -58,19 +56,20 @@ library ZkTrieVerifier {
|
||||
}
|
||||
}
|
||||
// compute poseidon hash of two uint256
|
||||
function poseidon_hash(hasher, v0, v1) -> r {
|
||||
function poseidon_hash(hasher, v0, v1, domain) -> r {
|
||||
let x := mload(0x40)
|
||||
// keccack256("poseidon(uint256[2])")
|
||||
mstore(x, 0x29a5f2f600000000000000000000000000000000000000000000000000000000)
|
||||
// keccack256("poseidon(uint256[2],uint256)")
|
||||
mstore(x, 0xa717016c00000000000000000000000000000000000000000000000000000000)
|
||||
mstore(add(x, 0x04), v0)
|
||||
mstore(add(x, 0x24), v1)
|
||||
let success := staticcall(gas(), hasher, x, 0x44, 0x20, 0x20)
|
||||
mstore(add(x, 0x44), domain)
|
||||
let success := staticcall(gas(), hasher, x, 0x64, 0x20, 0x20)
|
||||
require(success, "poseidon hash failed")
|
||||
r := mload(0x20)
|
||||
}
|
||||
// compute poseidon hash of 1 uint256
|
||||
function hash_uint256(hasher, v) -> r {
|
||||
r := poseidon_hash(hasher, shr(128, v), and(v, 0xffffffffffffffffffffffffffffffff))
|
||||
r := poseidon_hash(hasher, shr(128, v), and(v, 0xffffffffffffffffffffffffffffffff), 512)
|
||||
}
|
||||
|
||||
// traverses the tree from the root to the node before the leaf.
|
||||
@@ -90,15 +89,16 @@ library ZkTrieVerifier {
|
||||
} {
|
||||
// must be a parent node with two children
|
||||
let nodeType := byte(0, calldataload(ptr))
|
||||
// 6 <= nodeType && nodeType < 10
|
||||
require(lt(sub(nodeType, 6), 4), "InvalidBranchNodeType")
|
||||
ptr := add(ptr, 1)
|
||||
require(eq(nodeType, 0), "Invalid parent node")
|
||||
|
||||
// load left/right child hash
|
||||
let childHashL := calldataload(ptr)
|
||||
ptr := add(ptr, 0x20)
|
||||
let childHashR := calldataload(ptr)
|
||||
ptr := add(ptr, 0x20)
|
||||
let hash := poseidon_hash(hasher, childHashL, childHashR)
|
||||
let hash := poseidon_hash(hasher, childHashL, childHashR, nodeType)
|
||||
|
||||
// first item is considered the root node.
|
||||
// Otherwise verifies that the hash of the current node
|
||||
@@ -108,7 +108,7 @@ library ZkTrieVerifier {
|
||||
rootHash := hash
|
||||
}
|
||||
default {
|
||||
require(eq(hash, expectedHash), "Hash mismatch")
|
||||
require(eq(hash, expectedHash), "BranchHashMismatch")
|
||||
}
|
||||
|
||||
// decide which path to walk based on key
|
||||
@@ -130,129 +130,132 @@ library ZkTrieVerifier {
|
||||
x := keccak256(x, 0x2d)
|
||||
require(
|
||||
eq(x, 0x950654da67865a81bc70e45f3230f5179f08e29c66184bf746f71050f117b3b8),
|
||||
"Invalid ProofMagicBytes"
|
||||
"InvalidProofMagicBytes"
|
||||
)
|
||||
ptr := add(ptr, 0x2d) // skip ProofMagicBytes
|
||||
}
|
||||
|
||||
// shared variable names
|
||||
let storageHash
|
||||
// starting point
|
||||
let ptr := proof.offset
|
||||
function verifyAccountProof(hasher, _account, _ptr) -> ptr, storageRootHash, _stateRoot {
|
||||
ptr := _ptr
|
||||
|
||||
// verify account proof
|
||||
{
|
||||
let leafHash
|
||||
let key := hash_uint256(poseidon, shl(96, account))
|
||||
let key := hash_uint256(hasher, shl(96, _account))
|
||||
|
||||
// `stateRoot` is a return value and must be checked by the caller
|
||||
ptr, stateRoot, leafHash := walkTree(poseidon, key, ptr)
|
||||
ptr, _stateRoot, leafHash := walkTree(hasher, key, ptr)
|
||||
|
||||
require(eq(1, byte(0, calldataload(ptr))), "Invalid leaf node")
|
||||
ptr := add(ptr, 0x01) // skip NodeType
|
||||
require(eq(calldataload(ptr), key), "Node key mismatch")
|
||||
ptr := add(ptr, 0x20) // skip NodeKey
|
||||
{
|
||||
let valuePreimageLength := and(shr(224, calldataload(ptr)), 0xffff)
|
||||
// @todo check CompressedFlag
|
||||
switch byte(0, calldataload(ptr))
|
||||
case 4 {
|
||||
// nonempty leaf node
|
||||
ptr := add(ptr, 0x01) // skip NodeType
|
||||
require(eq(calldataload(ptr), key), "AccountKeyMismatch")
|
||||
ptr := add(ptr, 0x20) // skip NodeKey
|
||||
require(eq(shr(224, calldataload(ptr)), 0x05080000), "InvalidAccountCompressedFlag")
|
||||
ptr := add(ptr, 0x04) // skip CompressedFlag
|
||||
ptr := add(ptr, valuePreimageLength) // skip ValuePreimage
|
||||
}
|
||||
|
||||
// compute value hash for State Account Leaf Node
|
||||
{
|
||||
let tmpHash1 := calldataload(ptr)
|
||||
ptr := add(ptr, 0x20) // skip nonce/codesize/0
|
||||
tmpHash1 := poseidon_hash(poseidon, tmpHash1, calldataload(ptr))
|
||||
// compute value hash for State Account Leaf Node, details can be found in
|
||||
// https://github.com/scroll-tech/mpt-circuit/blob/v0.7/spec/mpt-proof.md#account-segmenttypes
|
||||
// [nonce||codesize||0, balance, storage_root, keccak codehash, poseidon codehash]
|
||||
mstore(0x00, calldataload(ptr))
|
||||
ptr := add(ptr, 0x20) // skip nonce||codesize||0
|
||||
mstore(0x00, poseidon_hash(hasher, mload(0x00), calldataload(ptr), 1280))
|
||||
ptr := add(ptr, 0x20) // skip balance
|
||||
storageHash := calldataload(ptr)
|
||||
storageRootHash := calldataload(ptr)
|
||||
ptr := add(ptr, 0x20) // skip StorageRoot
|
||||
let tmpHash2 := hash_uint256(poseidon, calldataload(ptr))
|
||||
let tmpHash := hash_uint256(hasher, calldataload(ptr))
|
||||
ptr := add(ptr, 0x20) // skip KeccakCodeHash
|
||||
tmpHash2 := poseidon_hash(poseidon, storageHash, tmpHash2)
|
||||
tmpHash2 := poseidon_hash(poseidon, tmpHash1, tmpHash2)
|
||||
tmpHash2 := poseidon_hash(poseidon, tmpHash2, calldataload(ptr))
|
||||
tmpHash := poseidon_hash(hasher, storageRootHash, tmpHash, 1280)
|
||||
tmpHash := poseidon_hash(hasher, mload(0x00), tmpHash, 1280)
|
||||
tmpHash := poseidon_hash(hasher, tmpHash, calldataload(ptr), 1280)
|
||||
ptr := add(ptr, 0x20) // skip PoseidonCodeHash
|
||||
|
||||
tmpHash1 := poseidon_hash(poseidon, 1, key)
|
||||
tmpHash1 := poseidon_hash(poseidon, tmpHash1, tmpHash2)
|
||||
tmpHash := poseidon_hash(hasher, key, tmpHash, 4)
|
||||
require(eq(leafHash, tmpHash), "InvalidAccountLeafNodeHash")
|
||||
|
||||
require(eq(leafHash, tmpHash1), "Invalid leaf node hash")
|
||||
require(eq(0x20, byte(0, calldataload(ptr))), "InvalidAccountKeyPreimageLength")
|
||||
ptr := add(ptr, 0x01) // skip KeyPreimage length
|
||||
require(eq(shl(96, _account), calldataload(ptr)), "InvalidAccountKeyPreimage")
|
||||
ptr := add(ptr, 0x20) // skip KeyPreimage
|
||||
}
|
||||
case 5 {
|
||||
ptr := add(ptr, 0x01) // skip NodeType
|
||||
}
|
||||
default {
|
||||
revertWith("InvalidAccountLeafNodeType")
|
||||
}
|
||||
|
||||
require(eq(0x20, byte(0, calldataload(ptr))), "Invalid KeyPreimage length")
|
||||
ptr := add(ptr, 0x01) // skip KeyPreimage length
|
||||
require(eq(shl(96, account), calldataload(ptr)), "Invalid KeyPreimage")
|
||||
ptr := add(ptr, 0x20) // skip KeyPreimage
|
||||
|
||||
// compare ProofMagicBytes
|
||||
ptr := checkProofMagicBytes(poseidon, ptr)
|
||||
ptr := checkProofMagicBytes(hasher, ptr)
|
||||
}
|
||||
|
||||
// verify storage proof
|
||||
{
|
||||
let leafHash
|
||||
let key := hash_uint256(poseidon, storageKey)
|
||||
{
|
||||
let rootHash
|
||||
ptr, rootHash, leafHash := walkTree(poseidon, key, ptr)
|
||||
function verifyStorageProof(hasher, _storageKey, storageRootHash, _ptr) -> ptr, _storageValue {
|
||||
ptr := _ptr
|
||||
|
||||
switch rootHash
|
||||
case 0 {
|
||||
// in the case that the leaf is the only element, then
|
||||
// the hash of the leaf must match the value from the account leaf
|
||||
require(eq(leafHash, storageHash), "Storage root mismatch")
|
||||
}
|
||||
default {
|
||||
// otherwise the root hash of the storage tree
|
||||
// must match the value from the account leaf
|
||||
require(eq(rootHash, storageHash), "Storage root mismatch")
|
||||
}
|
||||
let leafHash
|
||||
let key := hash_uint256(hasher, _storageKey)
|
||||
let rootHash
|
||||
ptr, rootHash, leafHash := walkTree(hasher, key, ptr)
|
||||
|
||||
// The root hash of the storage tree must match the value from the account leaf.
|
||||
// But when the leaf node is the same as the root node, the function `walkTree` will return
|
||||
// `rootHash=0` and `leafHash=0`. In such case, we don't need to check the value of `rootHash`.
|
||||
// And the value of `leafHash` should be the same as `storageRootHash`.
|
||||
switch rootHash
|
||||
case 0 {
|
||||
leafHash := storageRootHash
|
||||
}
|
||||
default {
|
||||
require(eq(rootHash, storageRootHash), "StorageRootMismatch")
|
||||
}
|
||||
|
||||
switch byte(0, calldataload(ptr))
|
||||
case 1 {
|
||||
case 4 {
|
||||
ptr := add(ptr, 0x01) // skip NodeType
|
||||
require(eq(calldataload(ptr), key), "Node key mismatch")
|
||||
require(eq(calldataload(ptr), key), "StorageKeyMismatch")
|
||||
ptr := add(ptr, 0x20) // skip NodeKey
|
||||
{
|
||||
let valuePreimageLength := and(shr(224, calldataload(ptr)), 0xffff)
|
||||
// @todo check CompressedFlag
|
||||
ptr := add(ptr, 0x04) // skip CompressedFlag
|
||||
ptr := add(ptr, valuePreimageLength) // skip ValuePreimage
|
||||
}
|
||||
|
||||
storageValue := calldataload(ptr)
|
||||
require(eq(shr(224, calldataload(ptr)), 0x01010000), "InvalidStorageCompressedFlag")
|
||||
ptr := add(ptr, 0x04) // skip CompressedFlag
|
||||
_storageValue := calldataload(ptr)
|
||||
ptr := add(ptr, 0x20) // skip StorageValue
|
||||
|
||||
mstore(0x00, hash_uint256(poseidon, storageValue))
|
||||
key := poseidon_hash(poseidon, 1, key)
|
||||
mstore(0x00, poseidon_hash(poseidon, key, mload(0x00)))
|
||||
require(eq(leafHash, mload(0x00)), "Invalid leaf node hash")
|
||||
// compute leaf node hash and compare, details can be found in
|
||||
// https://github.com/scroll-tech/mpt-circuit/blob/v0.7/spec/mpt-proof.md#storage-segmenttypes
|
||||
mstore(0x00, hash_uint256(hasher, _storageValue))
|
||||
mstore(0x00, poseidon_hash(hasher, key, mload(0x00), 4))
|
||||
require(eq(leafHash, mload(0x00)), "InvalidStorageLeafNodeHash")
|
||||
|
||||
require(eq(0x20, byte(0, calldataload(ptr))), "Invalid KeyPreimage length")
|
||||
require(eq(0x20, byte(0, calldataload(ptr))), "InvalidStorageKeyPreimageLength")
|
||||
ptr := add(ptr, 0x01) // skip KeyPreimage length
|
||||
require(eq(storageKey, calldataload(ptr)), "Invalid KeyPreimage")
|
||||
require(eq(_storageKey, calldataload(ptr)), "InvalidStorageKeyPreimage")
|
||||
ptr := add(ptr, 0x20) // skip KeyPreimage
|
||||
}
|
||||
case 2 {
|
||||
case 5 {
|
||||
ptr := add(ptr, 0x01) // skip NodeType
|
||||
require(eq(leafHash, 0), "Invalid empty node hash")
|
||||
require(eq(leafHash, 0), "InvalidStorageEmptyLeafNodeHash")
|
||||
}
|
||||
default {
|
||||
revertWith("Invalid leaf node")
|
||||
revertWith("InvalidStorageLeafNodeType")
|
||||
}
|
||||
|
||||
// compare ProofMagicBytes
|
||||
ptr := checkProofMagicBytes(poseidon, ptr)
|
||||
ptr := checkProofMagicBytes(hasher, ptr)
|
||||
}
|
||||
|
||||
let storageRootHash
|
||||
let ptr := proof.offset
|
||||
|
||||
// check the correctness of account proof
|
||||
ptr, storageRootHash, stateRoot := verifyAccountProof(poseidon, account, ptr)
|
||||
|
||||
// check the correctness of storage proof
|
||||
ptr, storageValue := verifyStorageProof(poseidon, storageKey, storageRootHash, ptr)
|
||||
|
||||
// the one and only boundary check
|
||||
// in case an attacker crafted a malicous payload
|
||||
// and succeeds in the prior verification steps
|
||||
// then this should catch any bogus accesses
|
||||
if iszero(eq(ptr, add(proof.offset, proof.length))) {
|
||||
revertWith("Proof length mismatch")
|
||||
revertWith("ProofLengthMismatch")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
154
contracts/src/lido/L1LidoGateway.sol
Normal file
154
contracts/src/lido/L1LidoGateway.sol
Normal file
@@ -0,0 +1,154 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {IL1ERC20Gateway} from "../L1/gateways/IL1ERC20Gateway.sol";
|
||||
import {L1ERC20Gateway} from "../L1/gateways/L1ERC20Gateway.sol";
|
||||
import {IL1ScrollMessenger} from "../L1/IL1ScrollMessenger.sol";
|
||||
import {IL2ERC20Gateway} from "../L2/gateways/IL2ERC20Gateway.sol";
|
||||
import {ScrollGatewayBase} from "../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
import {LidoBridgeableTokens} from "./LidoBridgeableTokens.sol";
|
||||
import {LidoGatewayManager} from "./LidoGatewayManager.sol";
|
||||
|
||||
contract L1LidoGateway is L1ERC20Gateway, LidoBridgeableTokens, LidoGatewayManager {
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when deposit zero amount token.
|
||||
error ErrorDepositZeroAmount();
|
||||
|
||||
/// @dev Thrown when deposit erc20 with calldata.
|
||||
error DepositAndCallIsNotAllowed();
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @dev The initial version of `L1LidoGateway` use `L1CustomERC20Gateway`. We keep the storage
|
||||
/// slot for `tokenMapping` for compatibility. It should no longer be used.
|
||||
mapping(address => address) private __tokenMapping;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
/// @notice Constructor for `L1LidoGateway` implementation contract.
|
||||
///
|
||||
/// @param _l1Token The address of the bridged token in the L1 chain
|
||||
/// @param _l2Token The address of the token minted on the L2 chain when token bridged
|
||||
/// @param _counterpart The address of `L2LidoGateway` contract in L2.
|
||||
/// @param _router The address of `L1GatewayRouter` contract.
|
||||
/// @param _messenger The address of `L1ScrollMessenger` contract.
|
||||
constructor(
|
||||
address _l1Token,
|
||||
address _l2Token,
|
||||
address _counterpart,
|
||||
address _router,
|
||||
address _messenger
|
||||
) LidoBridgeableTokens(_l1Token, _l2Token) ScrollGatewayBase(_counterpart, _router, _messenger) {
|
||||
if (_l1Token == address(0) || _l2Token == address(0) || _router == address(0)) {
|
||||
revert ErrorZeroAddress();
|
||||
}
|
||||
|
||||
_disableInitializers();
|
||||
}
|
||||
|
||||
/// @notice Initialize the storage of L1LidoGateway v1.
|
||||
///
|
||||
/// @dev The parameters `_counterpart`, `_router` and `_messenger` are no longer used.
|
||||
///
|
||||
/// @param _counterpart The address of `L2LidoGateway` contract in L2.
|
||||
/// @param _router The address of `L1GatewayRouter` contract.
|
||||
/// @param _messenger The address of `L1ScrollMessenger` contract.
|
||||
function initialize(
|
||||
address _counterpart,
|
||||
address _router,
|
||||
address _messenger
|
||||
) external initializer {
|
||||
ScrollGatewayBase._initialize(_counterpart, _router, _messenger);
|
||||
}
|
||||
|
||||
/// @notice Initialize the storage of L1LidoGateway v2.
|
||||
/// @param _depositsEnabler The address of user who can enable deposits
|
||||
/// @param _depositsEnabler The address of user who can disable deposits
|
||||
/// @param _withdrawalsEnabler The address of user who can enable withdrawals
|
||||
/// @param _withdrawalsDisabler The address of user who can disable withdrawals
|
||||
function initializeV2(
|
||||
address _depositsEnabler,
|
||||
address _depositsDisabler,
|
||||
address _withdrawalsEnabler,
|
||||
address _withdrawalsDisabler
|
||||
) external reinitializer(2) {
|
||||
__LidoGatewayManager_init(_depositsEnabler, _depositsDisabler, _withdrawalsEnabler, _withdrawalsDisabler);
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @inheritdoc IL1ERC20Gateway
|
||||
function getL2ERC20Address(address _l1Token)
|
||||
external
|
||||
view
|
||||
override
|
||||
onlySupportedL1Token(_l1Token)
|
||||
returns (address)
|
||||
{
|
||||
return l2Token;
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @inheritdoc L1ERC20Gateway
|
||||
/// @dev The length of `_data` always be zero, which guarantee by `L2LidoGateway`.
|
||||
function _beforeFinalizeWithdrawERC20(
|
||||
address _l1Token,
|
||||
address _l2Token,
|
||||
address,
|
||||
address,
|
||||
uint256,
|
||||
bytes calldata
|
||||
) internal virtual override onlySupportedL1Token(_l1Token) onlySupportedL2Token(_l2Token) whenWithdrawalsEnabled {
|
||||
if (msg.value != 0) revert ErrorNonZeroMsgValue();
|
||||
}
|
||||
|
||||
/// @inheritdoc L1ERC20Gateway
|
||||
function _beforeDropMessage(
|
||||
address _token,
|
||||
address,
|
||||
uint256
|
||||
) internal virtual override onlySupportedL1Token(_token) {
|
||||
if (msg.value != 0) revert ErrorNonZeroMsgValue();
|
||||
}
|
||||
|
||||
/// @inheritdoc L1ERC20Gateway
|
||||
function _deposit(
|
||||
address _token,
|
||||
address _to,
|
||||
uint256 _amount,
|
||||
bytes memory _data,
|
||||
uint256 _gasLimit
|
||||
) internal virtual override nonReentrant onlySupportedL1Token(_token) onlyNonZeroAccount(_to) whenDepositsEnabled {
|
||||
if (_amount == 0) revert ErrorDepositZeroAmount();
|
||||
|
||||
// 1. Transfer token into this contract.
|
||||
address _from;
|
||||
(_from, _amount, _data) = _transferERC20In(_token, _amount, _data);
|
||||
if (_data.length != 0) revert DepositAndCallIsNotAllowed();
|
||||
|
||||
// 2. Generate message passed to L2LidoGateway.
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL2ERC20Gateway.finalizeDepositERC20,
|
||||
(_token, l2Token, _from, _to, _amount, _data)
|
||||
);
|
||||
|
||||
// 3. Send message to L1ScrollMessenger.
|
||||
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, _from);
|
||||
|
||||
emit DepositERC20(_token, l2Token, _from, _to, _amount, _data);
|
||||
}
|
||||
}
|
||||
187
contracts/src/lido/L2LidoGateway.sol
Normal file
187
contracts/src/lido/L2LidoGateway.sol
Normal file
@@ -0,0 +1,187 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {IL1ERC20Gateway} from "../L1/gateways/IL1ERC20Gateway.sol";
|
||||
import {IL2ERC20Gateway} from "../L2/gateways/IL2ERC20Gateway.sol";
|
||||
import {L2ERC20Gateway} from "../L2/gateways/L2ERC20Gateway.sol";
|
||||
import {IL2ScrollMessenger} from "../L2/IL2ScrollMessenger.sol";
|
||||
import {IScrollERC20Upgradeable} from "../libraries/token/IScrollERC20Upgradeable.sol";
|
||||
import {ScrollGatewayBase} from "../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
import {LidoBridgeableTokens} from "./LidoBridgeableTokens.sol";
|
||||
import {LidoGatewayManager} from "./LidoGatewayManager.sol";
|
||||
|
||||
contract L2LidoGateway is L2ERC20Gateway, LidoBridgeableTokens, LidoGatewayManager {
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when withdraw zero amount token.
|
||||
error ErrorWithdrawZeroAmount();
|
||||
|
||||
/// @dev Thrown when withdraw erc20 with calldata.
|
||||
error WithdrawAndCallIsNotAllowed();
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @dev The initial version of `L2LidoGateway` use `L2CustomERC20Gateway`. We keep the storage
|
||||
/// slot for `tokenMapping` for compatibility. It should no longer be used.
|
||||
mapping(address => address) private __tokenMapping;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
/// @notice Constructor for `L2LidoGateway` implementation contract.
|
||||
///
|
||||
/// @param _l1Token The address of the bridged token in the L1 chain
|
||||
/// @param _l2Token The address of the token minted on the L2 chain when token bridged
|
||||
/// @param _counterpart The address of `L1LidoGateway` contract in L1.
|
||||
/// @param _router The address of `L2GatewayRouter` contract in L2.
|
||||
/// @param _messenger The address of `L2ScrollMessenger` contract in L2.
|
||||
constructor(
|
||||
address _l1Token,
|
||||
address _l2Token,
|
||||
address _counterpart,
|
||||
address _router,
|
||||
address _messenger
|
||||
) LidoBridgeableTokens(_l1Token, _l2Token) ScrollGatewayBase(_counterpart, _router, _messenger) {
|
||||
if (_l1Token == address(0) || _l2Token == address(0) || _router == address(0)) {
|
||||
revert ErrorZeroAddress();
|
||||
}
|
||||
|
||||
_disableInitializers();
|
||||
}
|
||||
|
||||
/// @notice Initialize the storage of L2LidoGateway v1.
|
||||
///
|
||||
/// @dev The parameters `_counterpart`, `_router` and `_messenger` are no longer used.
|
||||
///
|
||||
/// @param _counterpart The address of `L1LidoGateway` contract in L1.
|
||||
/// @param _router The address of `L2GatewayRouter` contract in L2.
|
||||
/// @param _messenger The address of `L2ScrollMessenger` contract in L2.
|
||||
function initialize(
|
||||
address _counterpart,
|
||||
address _router,
|
||||
address _messenger
|
||||
) external initializer {
|
||||
ScrollGatewayBase._initialize(_counterpart, _router, _messenger);
|
||||
}
|
||||
|
||||
/// @notice Initialize the storage of L2LidoGateway v2.
|
||||
/// @param _depositsEnabler The address of user who can enable deposits
|
||||
/// @param _depositsEnabler The address of user who can disable deposits
|
||||
/// @param _withdrawalsEnabler The address of user who can enable withdrawals
|
||||
/// @param _withdrawalsDisabler The address of user who can disable withdrawals
|
||||
function initializeV2(
|
||||
address _depositsEnabler,
|
||||
address _depositsDisabler,
|
||||
address _withdrawalsEnabler,
|
||||
address _withdrawalsDisabler
|
||||
) external reinitializer(2) {
|
||||
__LidoGatewayManager_init(_depositsEnabler, _depositsDisabler, _withdrawalsEnabler, _withdrawalsDisabler);
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @inheritdoc IL2ERC20Gateway
|
||||
function getL1ERC20Address(address _l2Token)
|
||||
external
|
||||
view
|
||||
override
|
||||
onlySupportedL2Token(_l2Token)
|
||||
returns (address)
|
||||
{
|
||||
return l1Token;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL2ERC20Gateway
|
||||
function getL2ERC20Address(address _l1Token)
|
||||
external
|
||||
view
|
||||
override
|
||||
onlySupportedL1Token(_l1Token)
|
||||
returns (address)
|
||||
{
|
||||
return l2Token;
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IL2ERC20Gateway
|
||||
/// @dev The length of `_data` always be zero, which guarantee by `L1LidoGateway`.
|
||||
function finalizeDepositERC20(
|
||||
address _l1Token,
|
||||
address _l2Token,
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _amount,
|
||||
bytes calldata _data
|
||||
)
|
||||
external
|
||||
payable
|
||||
override
|
||||
onlyCallByCounterpart
|
||||
nonReentrant
|
||||
onlySupportedL1Token(_l1Token)
|
||||
onlySupportedL2Token(_l2Token)
|
||||
whenDepositsEnabled
|
||||
{
|
||||
if (msg.value != 0) revert ErrorNonZeroMsgValue();
|
||||
|
||||
IScrollERC20Upgradeable(_l2Token).mint(_to, _amount);
|
||||
|
||||
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @inheritdoc L2ERC20Gateway
|
||||
function _withdraw(
|
||||
address _l2Token,
|
||||
address _to,
|
||||
uint256 _amount,
|
||||
bytes memory _data,
|
||||
uint256 _gasLimit
|
||||
)
|
||||
internal
|
||||
virtual
|
||||
override
|
||||
nonReentrant
|
||||
onlySupportedL2Token(_l2Token)
|
||||
onlyNonZeroAccount(_to)
|
||||
whenWithdrawalsEnabled
|
||||
{
|
||||
if (_amount == 0) revert ErrorWithdrawZeroAmount();
|
||||
|
||||
// 1. Extract real sender if this call is from L2GatewayRouter.
|
||||
address _from = _msgSender();
|
||||
if (router == _from) {
|
||||
(_from, _data) = abi.decode(_data, (address, bytes));
|
||||
}
|
||||
if (_data.length != 0) revert WithdrawAndCallIsNotAllowed();
|
||||
|
||||
// 2. Burn token.
|
||||
IScrollERC20Upgradeable(_l2Token).burn(_from, _amount);
|
||||
|
||||
// 3. Generate message passed to L1LidoGateway.
|
||||
bytes memory _message = abi.encodeCall(
|
||||
IL1ERC20Gateway.finalizeWithdrawERC20,
|
||||
(l1Token, _l2Token, _from, _to, _amount, _data)
|
||||
);
|
||||
|
||||
// 4. send message to L2ScrollMessenger
|
||||
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
|
||||
|
||||
emit WithdrawERC20(l1Token, _l2Token, _from, _to, _amount, _data);
|
||||
}
|
||||
}
|
||||
49
contracts/src/lido/L2WstETHToken.sol
Normal file
49
contracts/src/lido/L2WstETHToken.sol
Normal file
@@ -0,0 +1,49 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {IERC20PermitUpgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/extensions/IERC20PermitUpgradeable.sol";
|
||||
import {ERC20PermitUpgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/extensions/ERC20PermitUpgradeable.sol";
|
||||
import {SignatureCheckerUpgradeable} from "@openzeppelin/contracts-upgradeable/utils/cryptography/SignatureCheckerUpgradeable.sol";
|
||||
|
||||
import {ScrollStandardERC20} from "../libraries/token/ScrollStandardERC20.sol";
|
||||
|
||||
contract L2WstETHToken is ScrollStandardERC20 {
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @dev See {ERC20PermitUpgradeable-_PERMIT_TYPEHASH}
|
||||
bytes32 private constant _PERMIT_TYPEHASH =
|
||||
keccak256("Permit(address owner,address spender,uint256 value,uint256 nonce,uint256 deadline)");
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IERC20PermitUpgradeable
|
||||
///
|
||||
/// @dev The code is copied from `ERC20PermitUpgradeable` with modifications to support ERC-1271.
|
||||
function permit(
|
||||
address owner,
|
||||
address spender,
|
||||
uint256 value,
|
||||
uint256 deadline,
|
||||
uint8 v,
|
||||
bytes32 r,
|
||||
bytes32 s
|
||||
) public virtual override(ERC20PermitUpgradeable, IERC20PermitUpgradeable) {
|
||||
require(block.timestamp <= deadline, "ERC20Permit: expired deadline");
|
||||
|
||||
bytes32 structHash = keccak256(abi.encode(_PERMIT_TYPEHASH, owner, spender, value, _useNonce(owner), deadline));
|
||||
|
||||
bytes32 hash = _hashTypedDataV4(structHash);
|
||||
|
||||
require(
|
||||
SignatureCheckerUpgradeable.isValidSignatureNow(owner, hash, abi.encodePacked(r, s, v)),
|
||||
"ERC20Permit: invalid signature"
|
||||
);
|
||||
|
||||
_approve(owner, spender, value);
|
||||
}
|
||||
}
|
||||
70
contracts/src/lido/LidoBridgeableTokens.sol
Normal file
70
contracts/src/lido/LidoBridgeableTokens.sol
Normal file
@@ -0,0 +1,70 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
abstract contract LidoBridgeableTokens {
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @notice The address of bridged token in L1 chain.
|
||||
address public immutable l1Token;
|
||||
|
||||
/// @notice The address of the token minted on the L2 chain when token bridged.
|
||||
address public immutable l2Token;
|
||||
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown the given `l1Token` is not supported.
|
||||
error ErrorUnsupportedL1Token();
|
||||
|
||||
/// @dev Thrown the given `l2Token` is not supported.
|
||||
error ErrorUnsupportedL2Token();
|
||||
|
||||
/// @dev Thrown the given account is zero address.
|
||||
error ErrorAccountIsZeroAddress();
|
||||
|
||||
/// @dev Thrown the `msg.value` is not zero.
|
||||
error ErrorNonZeroMsgValue();
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
**********************/
|
||||
|
||||
/// @dev Validates that passed `_l1Token` is supported by the bridge
|
||||
modifier onlySupportedL1Token(address _l1Token) {
|
||||
if (_l1Token != l1Token) {
|
||||
revert ErrorUnsupportedL1Token();
|
||||
}
|
||||
_;
|
||||
}
|
||||
|
||||
/// @dev Validates that passed `_l2Token` is supported by the bridge
|
||||
modifier onlySupportedL2Token(address _l2Token) {
|
||||
if (_l2Token != l2Token) {
|
||||
revert ErrorUnsupportedL2Token();
|
||||
}
|
||||
_;
|
||||
}
|
||||
|
||||
/// @dev validates that `_account` is not zero address
|
||||
modifier onlyNonZeroAccount(address _account) {
|
||||
if (_account == address(0)) {
|
||||
revert ErrorAccountIsZeroAddress();
|
||||
}
|
||||
_;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
/// @param _l1Token The address of the bridged token in the L1 chain
|
||||
/// @param _l2Token The address of the token minted on the L2 chain when token bridged
|
||||
constructor(address _l1Token, address _l2Token) {
|
||||
l1Token = _l1Token;
|
||||
l2Token = _l2Token;
|
||||
}
|
||||
}
|
||||
288
contracts/src/lido/LidoGatewayManager.sol
Normal file
288
contracts/src/lido/LidoGatewayManager.sol
Normal file
@@ -0,0 +1,288 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {EnumerableSetUpgradeable} from "@openzeppelin/contracts-upgradeable/utils/structs/EnumerableSetUpgradeable.sol";
|
||||
|
||||
import {ScrollGatewayBase} from "../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
// solhint-disable func-name-mixedcase
|
||||
|
||||
abstract contract LidoGatewayManager is ScrollGatewayBase {
|
||||
using EnumerableSetUpgradeable for EnumerableSetUpgradeable.AddressSet;
|
||||
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted then caller enable deposits.
|
||||
/// @param enabler The address of caller.
|
||||
event DepositsEnabled(address indexed enabler);
|
||||
|
||||
/// @notice Emitted then caller disable deposits.
|
||||
/// @param disabler The address of caller.
|
||||
event DepositsDisabled(address indexed disabler);
|
||||
|
||||
/// @notice Emitted then caller enable withdrawals.
|
||||
/// @param enabler The address of caller.
|
||||
event WithdrawalsEnabled(address indexed enabler);
|
||||
|
||||
/// @notice Emitted then caller disable withdrawals.
|
||||
/// @param disabler The address of caller.
|
||||
event WithdrawalsDisabled(address indexed disabler);
|
||||
|
||||
/// @notice Emitted when `account` is granted `role`.
|
||||
///
|
||||
/// @param role The role granted.
|
||||
/// @param account The address of account to grant the role.
|
||||
/// @param sender The address of owner.
|
||||
event RoleGranted(bytes32 indexed role, address indexed account, address indexed sender);
|
||||
|
||||
/// @notice Emitted when `account` is revoked `role`.
|
||||
///
|
||||
/// @param role The role revoked.
|
||||
/// @param account The address of account to revoke the role.
|
||||
/// @param sender The address of owner.
|
||||
event RoleRevoked(bytes32 indexed role, address indexed account, address indexed sender);
|
||||
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when deposits are enabled while caller try to enable it again.
|
||||
error ErrorDepositsEnabled();
|
||||
|
||||
/// @dev Thrown when deposits are disable while caller try to deposits related operation.
|
||||
error ErrorDepositsDisabled();
|
||||
|
||||
/// @dev Thrown when withdrawals are enabled while caller try to enable it again.
|
||||
error ErrorWithdrawalsEnabled();
|
||||
|
||||
/// @dev Thrown when withdrawals are disable while caller try to withdrawals related operation.
|
||||
error ErrorWithdrawalsDisabled();
|
||||
|
||||
/// @dev Thrown when caller is not deposits enabler.
|
||||
error ErrorCallerIsNotDepositsEnabler();
|
||||
|
||||
/// @dev Thrown when caller is not deposits disabler.
|
||||
error ErrorCallerIsNotDepositsDisabler();
|
||||
|
||||
/// @dev Thrown when caller is not withdrawals enabler.
|
||||
error ErrorCallerIsNotWithdrawalsEnabler();
|
||||
|
||||
/// @dev Thrown when caller is not withdrawals disabler.
|
||||
error ErrorCallerIsNotWithdrawalsDisabler();
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
/// @dev Stores the state of the bridging
|
||||
/// @param isDepositsEnabled Stores the state of the deposits
|
||||
/// @param isWithdrawalsEnabled Stores the state of the withdrawals
|
||||
/// @param roles Mapping from role to list of role members.
|
||||
struct State {
|
||||
bool isDepositsEnabled;
|
||||
bool isWithdrawalsEnabled;
|
||||
mapping(bytes32 => EnumerableSetUpgradeable.AddressSet) roles;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @dev The location of the slot with State
|
||||
bytes32 private constant STATE_SLOT = keccak256("LidoGatewayManager.bridgingState");
|
||||
|
||||
/// @notice The role for deposits enabler.
|
||||
bytes32 public constant DEPOSITS_ENABLER_ROLE = keccak256("BridgingManager.DEPOSITS_ENABLER_ROLE");
|
||||
|
||||
/// @notice The role for deposits disabler.
|
||||
bytes32 public constant DEPOSITS_DISABLER_ROLE = keccak256("BridgingManager.DEPOSITS_DISABLER_ROLE");
|
||||
|
||||
/// @notice The role for withdrawals enabler.
|
||||
bytes32 public constant WITHDRAWALS_ENABLER_ROLE = keccak256("BridgingManager.WITHDRAWALS_ENABLER_ROLE");
|
||||
|
||||
/// @notice The role for withdrawals disabler.
|
||||
bytes32 public constant WITHDRAWALS_DISABLER_ROLE = keccak256("BridgingManager.WITHDRAWALS_DISABLER_ROLE");
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
**********************/
|
||||
|
||||
/// @dev Validates that deposits are enabled
|
||||
modifier whenDepositsEnabled() {
|
||||
if (!isDepositsEnabled()) revert ErrorDepositsDisabled();
|
||||
_;
|
||||
}
|
||||
|
||||
/// @dev Validates that withdrawals are enabled
|
||||
modifier whenWithdrawalsEnabled() {
|
||||
if (!isWithdrawalsEnabled()) revert ErrorWithdrawalsDisabled();
|
||||
_;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
/// @notice Initialize the storage of LidoGatewayManager.
|
||||
/// @param _depositsEnabler The address of user who can enable deposits
|
||||
/// @param _depositsEnabler The address of user who can disable deposits
|
||||
/// @param _withdrawalsEnabler The address of user who can enable withdrawals
|
||||
/// @param _withdrawalsDisabler The address of user who can disable withdrawals
|
||||
function __LidoGatewayManager_init(
|
||||
address _depositsEnabler,
|
||||
address _depositsDisabler,
|
||||
address _withdrawalsEnabler,
|
||||
address _withdrawalsDisabler
|
||||
) internal onlyInitializing {
|
||||
State storage s = _loadState();
|
||||
|
||||
s.isDepositsEnabled = true;
|
||||
emit DepositsEnabled(_msgSender());
|
||||
|
||||
s.isWithdrawalsEnabled = true;
|
||||
emit WithdrawalsEnabled(_msgSender());
|
||||
|
||||
_grantRole(DEPOSITS_ENABLER_ROLE, _depositsEnabler);
|
||||
_grantRole(DEPOSITS_DISABLER_ROLE, _depositsDisabler);
|
||||
_grantRole(WITHDRAWALS_ENABLER_ROLE, _withdrawalsEnabler);
|
||||
_grantRole(WITHDRAWALS_DISABLER_ROLE, _withdrawalsDisabler);
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Returns whether the deposits are enabled or not
|
||||
function isDepositsEnabled() public view returns (bool) {
|
||||
return _loadState().isDepositsEnabled;
|
||||
}
|
||||
|
||||
/// @notice Returns whether the withdrawals are enabled or not
|
||||
function isWithdrawalsEnabled() public view returns (bool) {
|
||||
return _loadState().isWithdrawalsEnabled;
|
||||
}
|
||||
|
||||
/// @notice Returns `true` if `_account` has been granted `_role`.
|
||||
function hasRole(bytes32 _role, address _account) public view returns (bool) {
|
||||
return _loadState().roles[_role].contains(_account);
|
||||
}
|
||||
|
||||
/// @notice Returns one of the accounts that have `_role`.
|
||||
///
|
||||
/// @param _role The role to query.
|
||||
/// @param _index The index of account to query. It must be a value between 0 and {getRoleMemberCount}, non-inclusive.
|
||||
function getRoleMember(bytes32 _role, uint256 _index) external view returns (address) {
|
||||
return _loadState().roles[_role].at(_index);
|
||||
}
|
||||
|
||||
/// @notice Returns the number of accounts that have `role`.
|
||||
///
|
||||
/// @dev Can be used together with {getRoleMember} to enumerate all bearers of a role.
|
||||
///
|
||||
/// @param _role The role to query.
|
||||
function getRoleMemberCount(bytes32 _role) external view returns (uint256) {
|
||||
return _loadState().roles[_role].length();
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Enables the deposits if they are disabled
|
||||
function enableDeposits() external {
|
||||
if (isDepositsEnabled()) revert ErrorDepositsEnabled();
|
||||
if (!hasRole(DEPOSITS_ENABLER_ROLE, _msgSender())) {
|
||||
revert ErrorCallerIsNotDepositsEnabler();
|
||||
}
|
||||
|
||||
_loadState().isDepositsEnabled = true;
|
||||
emit DepositsEnabled(_msgSender());
|
||||
}
|
||||
|
||||
/// @notice Disables the deposits if they aren't disabled yet
|
||||
function disableDeposits() external whenDepositsEnabled {
|
||||
if (!hasRole(DEPOSITS_DISABLER_ROLE, _msgSender())) {
|
||||
revert ErrorCallerIsNotDepositsDisabler();
|
||||
}
|
||||
|
||||
_loadState().isDepositsEnabled = false;
|
||||
emit DepositsDisabled(_msgSender());
|
||||
}
|
||||
|
||||
/// @notice Enables the withdrawals if they are disabled
|
||||
function enableWithdrawals() external {
|
||||
if (isWithdrawalsEnabled()) revert ErrorWithdrawalsEnabled();
|
||||
if (!hasRole(WITHDRAWALS_ENABLER_ROLE, _msgSender())) {
|
||||
revert ErrorCallerIsNotWithdrawalsEnabler();
|
||||
}
|
||||
|
||||
_loadState().isWithdrawalsEnabled = true;
|
||||
emit WithdrawalsEnabled(_msgSender());
|
||||
}
|
||||
|
||||
/// @notice Disables the withdrawals if they aren't disabled yet
|
||||
function disableWithdrawals() external whenWithdrawalsEnabled {
|
||||
if (!hasRole(WITHDRAWALS_DISABLER_ROLE, _msgSender())) {
|
||||
revert ErrorCallerIsNotWithdrawalsDisabler();
|
||||
}
|
||||
|
||||
_loadState().isWithdrawalsEnabled = false;
|
||||
emit WithdrawalsDisabled(_msgSender());
|
||||
}
|
||||
|
||||
/// @notice Grants `_role` from `_account`.
|
||||
/// If `account` had been granted `role`, emits a {RoleGranted} event.
|
||||
///
|
||||
/// @param _role The role to grant.
|
||||
/// @param _account The address of account to grant.
|
||||
function grantRole(bytes32 _role, address _account) external onlyOwner {
|
||||
_grantRole(_role, _account);
|
||||
}
|
||||
|
||||
/// @notice Revokes `_role` from `_account`.
|
||||
/// If `account` had been granted `role`, emits a {RoleRevoked} event.
|
||||
///
|
||||
/// @param _role The role to revoke.
|
||||
/// @param _account The address of account to revoke.
|
||||
function revokeRole(bytes32 _role, address _account) external onlyOwner {
|
||||
_revokeRole(_role, _account);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Returns the reference to the slot with State struct
|
||||
function _loadState() private pure returns (State storage r) {
|
||||
bytes32 slot = STATE_SLOT;
|
||||
// solhint-disable-next-line no-inline-assembly
|
||||
assembly {
|
||||
r.slot := slot
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to grant `_role` from `_account`.
|
||||
/// If `account` had been granted `role`, emits a {RoleGranted} event.
|
||||
///
|
||||
/// @param _role The role to grant.
|
||||
/// @param _account The address of account to grant.
|
||||
function _grantRole(bytes32 _role, address _account) internal {
|
||||
if (_loadState().roles[_role].add(_account)) {
|
||||
emit RoleGranted(_role, _account, _msgSender());
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to revoke `_role` from `_account`.
|
||||
/// If `account` had been granted `role`, emits a {RoleRevoked} event.
|
||||
///
|
||||
/// @param _role The role to revoke.
|
||||
/// @param _account The address of account to revoke.
|
||||
function _revokeRole(bytes32 _role, address _account) internal {
|
||||
if (_loadState().roles[_role].remove(_account)) {
|
||||
emit RoleRevoked(_role, _account, _msgSender());
|
||||
}
|
||||
}
|
||||
}
|
||||
105
contracts/src/lido/README.md
Normal file
105
contracts/src/lido/README.md
Normal file
@@ -0,0 +1,105 @@
|
||||
# Lido's Scroll Bridge
|
||||
|
||||
The document details the implementation of the bridging of the ERC20 compatible tokens[^*] between Ethereum and Scroll chains.
|
||||
|
||||
It's the first step of Lido's integration into the Scroll protocol. The main goal of the current implementation is to be the strong foundation for the long-term goals of the Lido expansion in the Scroll chain. The long-run picture of the Lido's integration into L2s includes:
|
||||
|
||||
- Bridging of Lido's tokens from L1 to L2 chains
|
||||
- Instant ETH staking on L2 chains with receiving stETH/wstETH on the corresponding L2 immediately
|
||||
- Keeping UX on L2 as close as possible to the UX on Ethereum mainnet
|
||||
|
||||
At this point, the implementation must provide a scalable and reliable solution for Lido to bridge ERC20 compatible tokens between Scroll and the Ethereum chain.
|
||||
|
||||
[^*]: The current implementation might not support the non-standard functionality of the ERC20 tokens. For example, rebasable tokens or tokens with transfers fee will work incorrectly. In case your token implements some non-typical ERC20 logic, make sure it is compatible with the bridge before usage.
|
||||
|
||||
## Security surface overview
|
||||
|
||||
| Statement | Answer |
|
||||
| -------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| It is possible to bridge wstETH forth and back using this bridge | Yes |
|
||||
| The bridge using a canonical mechanism for message/value passing | Yes |
|
||||
| The bridge is upgradeable | Yes |
|
||||
| Upgrade authority for the bridge | TBA |
|
||||
| Emergency pause/cancel mechanisms and their authorities | TBA |
|
||||
| The bridged token support permits and ERC-1271 | Yes |
|
||||
| Are the following things in the scope of this bridge deployment: | |
|
||||
| - Passing the (w)stETH/USD price feed | No |
|
||||
| - Passing Lido DAO governance decisions | [Lido DAO Agent](https://etherscan.io/address/0x3e40D73EB977Dc6a537aF587D48316feE66E9C8c) representation [on Scroll (address TBD)] via [ScrollBridgeExecutor](https://github.com/scroll-tech/governance-crosschain-bridges/blob/scroll/contracts/bridges/ScrollBridgeExecutor.sol) |
|
||||
| Bridges are complicated in that the transaction can succeed on one side and fail on the other. What's the handling mechanism for this issue? | TBA |
|
||||
| Is there a deployment script that sets all the parameters and authorities correctly? | No, we are upgrading from existing gateway, will need to involve multisig operation by Scroll |
|
||||
| Is there a post-deploy check script that, given a deployment, checks that all parameters and authorities are set correctly? | No |
|
||||
|
||||
## Scroll's Bridging Flow
|
||||
|
||||
The default implementation of the Scroll bridging solution consists of two parts: `L1StandardERC20Gateway` and `L2StandardERC20Gateway`. These contracts allow bridging the ERC20 tokens between Ethereum and Scroll chains.
|
||||
|
||||
In the standard bridge, when ERC20 is deposited on L1 and transferred to the bridge contract it remains "locked" there while the equivalent amount is minted in the L2 token. For withdrawals, the opposite happens the L2 token amount is burned then the same amount of L1 tokens is transferred to the recipient.
|
||||
|
||||
The default Scroll bridge is suitable for the short-term goal of the Lido (bridging of the wstETH token into Scroll), but it complicates the achievement of the long-term goals. For example, implementation of the staking from L2's very likely will require extending the token and gateway implementations.
|
||||
|
||||
Additionally, Scroll provides functionality to implement the custom bridge solution utilizing the same cross-domain infrastructure as the Standard bridge. The only constraint for the custom bridge to be compatible with the default Scroll Gateway is the implementation of the `IL1ERC20Gateway` and `IL2ERC20Gateway` interfaces.
|
||||
|
||||
The rest of the document provides a technical specification of the bridge Lido will use to transfer tokens between Ethereum and Scroll chains.
|
||||
|
||||
## Lido's Bridge Implementation
|
||||
|
||||
The current implementation of the tokens bridge provides functionality to bridge the specified type of ERC20 compatible token between Ethereum and Scroll chains. Additionally, the bridge provides some administrative features, like the **temporary disabling of the deposits and withdrawals**. It's necessary when bridging must be disabled fast because of the malicious usage of the bridge or vulnerability in the contracts. Also, it might be helpful in the implementation upgrade process.
|
||||
|
||||
The technical implementation focuses on the following requirements for the contracts:
|
||||
|
||||
- **Scalability** - current implementation must provide the ability to be extended with new functionality in the future.
|
||||
- **Simplicity** - implemented contracts must be clear, simple, and expressive for developers who will work with code in the future.
|
||||
- **Gas efficiency** - implemented solution must be efficient in terms of gas costs for the end-user, but at the same time, it must not violate the previous requirement.
|
||||
|
||||
A high-level overview of the proposed solution might be found in the below diagram:
|
||||
|
||||

|
||||
|
||||
- [**`LidoGatewayManager`**](./LidoGatewayManager.sol) - contains administrative methods to retrieve and control the state of the bridging process.
|
||||
- [**`LidoBridgeableTokens`**](./LidoBridgeableTokens.sol) - contains the logic for validation of tokens used in the bridging process.
|
||||
- [**`L1LidoGateway`**](./L1LidoGateway.sol) - Ethereum's counterpart of the bridge to bridge registered ERC20 compatible tokens between Ethereum and Scroll chains.
|
||||
- [**`L2LidoGateway`**](./L2LidoGateway.sol) - Scroll's counterpart of the bridge to bridge registered ERC20 compatible tokens between Ethereum and Scroll chains
|
||||
- [**`ScrollStandardERC20`**](../libraries/token/ScrollStandardERC20.sol) - an implementation of the `ERC20` token with administrative methods to mint and burn tokens.
|
||||
- [**`TransparentUpgradeableProxy`**](https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/proxy/transparent/TransparentUpgradeableProxy.sol) - the ERC1967 proxy with extra admin functionality.
|
||||
|
||||
## Scroll's Bridging Flow
|
||||
|
||||
The general process of bridging tokens via Scroll's Lido bridge can be found here: [ETH and ERC20 Token Bridge](https://docs.scroll.io/en/developers/l1-and-l2-bridging/eth-and-erc20-token-bridge/).
|
||||
|
||||
## Deployment Process
|
||||
|
||||
To reduce the gas costs for users, contracts `L1LidoGateway`, `L2LidoGateway`, and `ScrollStandardERC20` contracts use immutable variables as much as possible. But some of those variables are cross-referred. For example, `L1LidoGateway` has reference to `L2LidoGateway` and vice versa. As we use proxies, we can deploy proxies at first and without calling the `initialize` function from each gateway. Then call the `initialize` function with correct contract addresses.
|
||||
|
||||
Another option - pre-calculate the future address of the deployed contract offchain and deployed the implementation using pre-calculated addresses. But it is less fault-tolerant than the solution above.
|
||||
|
||||
## Integration Risks
|
||||
|
||||
As an additional link in the tokens flow chain, the Scroll protocol and bridges add points of failure. Below are the main risks of the current integration:
|
||||
|
||||
### Minting of uncollateralized L2 token
|
||||
|
||||
Such an attack might happen if an attacker obtains the right to call `L2LidoGateway.finalizeDepositERC20()` directly. In such a scenario, an attacker can mint uncollaterized tokens on L2 and initiate withdrawal later.
|
||||
|
||||
The best way to detect such an attack is an offchain monitoring of the minting and depositing/withdrawal events. Based on such events might be tracked following stats:
|
||||
|
||||
- `l1ERC20TokenBridgeBalance` - a total number of locked tokens on the L1 bridge contract
|
||||
- `l2TokenTotalSupply` - total number of minted L2 tokens
|
||||
- `l2TokenNotWithdrawn` - total number of burned L2 tokens which aren’t withdrawn from the L1 bridge
|
||||
|
||||
At any time following invariant must be satisfied: `l1ERC20TokenBridgeBalance == l2TokenTotalSupply + l2TokenNotWithdrawn`.
|
||||
|
||||
In the case of invariant violation, Lido will have a dispute period to suspend the L1 and L2 bridges. Disabled bridges forbid the minting of L2Token and withdrawal of minted tokens till the resolution of the issue.
|
||||
|
||||
### Attack to L1ScrollMessenger
|
||||
|
||||
According to the Scroll documentation, `L1ScrollMessenger`:
|
||||
|
||||
> The L1 Scroll Messenger contract sends messages from L1 to L2 and relays messages from L2 onto L1.
|
||||
|
||||
This contract is central in the L2-to-L1 communication process since all messages from L2 that verified by the zkevm proof are executed on behalf of this contract.
|
||||
|
||||
In case of a vulnerability in the `L1ScrollMessenger`, which allows the attacker to send arbitrary messages bypassing the the zkevm proof, an attacker can immediately drain tokens from the L1 bridge.
|
||||
|
||||
Additional risk creates the upgradeability of the `L1ScrollMessenger`. Exist a risk of an attack with the replacement of the implementation with some malicious functionality. Such an attack might be reduced to the above vulnerability and steal all locked tokens on the L1 bridge.
|
||||
|
||||
To respond quickly to such an attack, Lido can set up monitoring of the Proxy contract, which will ring the alarm in case of an implementation upgrade.
|
||||
40
contracts/src/test/integration/Domain.t.sol
Normal file
40
contracts/src/test/integration/Domain.t.sol
Normal file
@@ -0,0 +1,40 @@
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
pragma solidity >=0.8.0;
|
||||
|
||||
import {console2} from "forge-std/console2.sol";
|
||||
import {StdChains} from "forge-std/StdChains.sol";
|
||||
import {Vm} from "forge-std/Vm.sol";
|
||||
|
||||
// code from: https://github.com/marsfoundation/xchain-helpers/blob/master/src/testing/Domain.sol
|
||||
|
||||
contract Domain {
|
||||
// solhint-disable-next-line const-name-snakecase
|
||||
Vm internal constant vm = Vm(address(uint160(uint256(keccak256("hevm cheat code")))));
|
||||
|
||||
StdChains.Chain private _details;
|
||||
uint256 public forkId;
|
||||
|
||||
constructor(StdChains.Chain memory _chain) {
|
||||
_details = _chain;
|
||||
forkId = vm.createFork(_chain.rpcUrl);
|
||||
vm.makePersistent(address(this));
|
||||
}
|
||||
|
||||
function details() public view returns (StdChains.Chain memory) {
|
||||
return _details;
|
||||
}
|
||||
|
||||
function selectFork() public {
|
||||
vm.selectFork(forkId);
|
||||
require(
|
||||
block.chainid == _details.chainId,
|
||||
string(
|
||||
abi.encodePacked(_details.chainAlias, " is pointing to the wrong RPC endpoint '", _details.rpcUrl, "'")
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
function rollFork(uint256 blocknum) public {
|
||||
vm.rollFork(forkId, blocknum);
|
||||
}
|
||||
}
|
||||
129
contracts/src/test/integration/GatewayIntegrationBase.t.sol
Normal file
129
contracts/src/test/integration/GatewayIntegrationBase.t.sol
Normal file
@@ -0,0 +1,129 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {Test} from "forge-std/Test.sol";
|
||||
import {Vm} from "forge-std/Vm.sol";
|
||||
|
||||
import {ProxyAdmin} from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol";
|
||||
import {ITransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
|
||||
|
||||
import {Domain} from "./Domain.t.sol";
|
||||
|
||||
import {IL2ScrollMessenger} from "../../L2/IL2ScrollMessenger.sol";
|
||||
import {AddressAliasHelper} from "../../libraries/common/AddressAliasHelper.sol";
|
||||
|
||||
abstract contract GatewayIntegrationBase is Test {
|
||||
bytes32 private constant SENT_MESSAGE_TOPIC =
|
||||
keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes)");
|
||||
|
||||
address internal constant L1_SCROLL_MESSENGER = 0x6774Bcbd5ceCeF1336b5300fb5186a12DDD8b367;
|
||||
|
||||
address internal constant L1_SCROLL_CHAIN = 0xa13BAF47339d63B743e7Da8741db5456DAc1E556;
|
||||
|
||||
address internal constant L1_MESSAGE_QUEUE = 0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B;
|
||||
|
||||
address internal constant L1_GATEWAY_ROUTER = 0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6;
|
||||
|
||||
address internal constant L2_SCROLL_MESSENGER = 0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC;
|
||||
|
||||
address internal constant L2_MESSAGE_QUEUE = 0x5300000000000000000000000000000000000000;
|
||||
|
||||
address internal constant L2_GATEWAY_ROUTER = 0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79;
|
||||
|
||||
Domain internal mainnet;
|
||||
|
||||
Domain internal scroll;
|
||||
|
||||
uint256 internal lastFromMainnetLogIndex;
|
||||
|
||||
uint256 internal lastFromScrollLogIndex;
|
||||
|
||||
receive() external payable {}
|
||||
|
||||
// solhint-disable-next-line func-name-mixedcase
|
||||
function __GatewayIntegrationBase_setUp() internal {
|
||||
setChain("scroll", ChainData("Scroll Chain", 534352, "https://rpc.scroll.io"));
|
||||
setChain("mainnet", ChainData("Mainnet", 1, "https://rpc.ankr.com/eth"));
|
||||
|
||||
mainnet = new Domain(getChain("mainnet"));
|
||||
scroll = new Domain(getChain("scroll"));
|
||||
}
|
||||
|
||||
function relayFromMainnet() internal {
|
||||
scroll.selectFork();
|
||||
|
||||
address malias = AddressAliasHelper.applyL1ToL2Alias(L1_SCROLL_MESSENGER);
|
||||
|
||||
// Read all L1 -> L2 messages and relay them under Scroll fork
|
||||
Vm.Log[] memory allLogs = vm.getRecordedLogs();
|
||||
for (; lastFromMainnetLogIndex < allLogs.length; lastFromMainnetLogIndex++) {
|
||||
Vm.Log memory _log = allLogs[lastFromMainnetLogIndex];
|
||||
if (_log.topics[0] == SENT_MESSAGE_TOPIC && _log.emitter == address(L1_SCROLL_MESSENGER)) {
|
||||
address sender = address(uint160(uint256(_log.topics[1])));
|
||||
address target = address(uint160(uint256(_log.topics[2])));
|
||||
(uint256 value, uint256 nonce, uint256 gasLimit, bytes memory message) = abi.decode(
|
||||
_log.data,
|
||||
(uint256, uint256, uint256, bytes)
|
||||
);
|
||||
vm.prank(malias);
|
||||
IL2ScrollMessenger(L2_SCROLL_MESSENGER).relayMessage{gas: gasLimit}(
|
||||
sender,
|
||||
target,
|
||||
value,
|
||||
nonce,
|
||||
message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function relayFromScroll() internal {
|
||||
mainnet.selectFork();
|
||||
|
||||
// Read all L2 -> L1 messages and relay them under Primary fork
|
||||
// Note: We bypass the L1 messenger relay here because it's easier to not have to generate valid state roots / merkle proofs
|
||||
Vm.Log[] memory allLogs = vm.getRecordedLogs();
|
||||
for (; lastFromScrollLogIndex < allLogs.length; lastFromScrollLogIndex++) {
|
||||
Vm.Log memory _log = allLogs[lastFromScrollLogIndex];
|
||||
if (_log.topics[0] == SENT_MESSAGE_TOPIC && _log.emitter == address(L2_SCROLL_MESSENGER)) {
|
||||
address sender = address(uint160(uint256(_log.topics[1])));
|
||||
address target = address(uint160(uint256(_log.topics[2])));
|
||||
(uint256 value, , , bytes memory message) = abi.decode(_log.data, (uint256, uint256, uint256, bytes));
|
||||
// Set xDomainMessageSender
|
||||
vm.store(address(L1_SCROLL_MESSENGER), bytes32(uint256(201)), bytes32(uint256(uint160(sender))));
|
||||
vm.startPrank(address(L1_SCROLL_MESSENGER));
|
||||
(bool success, bytes memory response) = target.call{value: value}(message);
|
||||
vm.stopPrank();
|
||||
vm.store(address(L1_SCROLL_MESSENGER), bytes32(uint256(201)), bytes32(uint256(1)));
|
||||
if (!success) {
|
||||
assembly {
|
||||
revert(add(response, 32), mload(response))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function upgrade(
|
||||
bool isMainnet,
|
||||
address proxy,
|
||||
address implementation
|
||||
) internal {
|
||||
address admin;
|
||||
address owner;
|
||||
if (isMainnet) {
|
||||
mainnet.selectFork();
|
||||
admin = 0xEB803eb3F501998126bf37bB823646Ed3D59d072;
|
||||
owner = 0x798576400F7D662961BA15C6b3F3d813447a26a6;
|
||||
} else {
|
||||
scroll.selectFork();
|
||||
admin = 0xA76acF000C890b0DD7AEEf57627d9899F955d026;
|
||||
owner = 0x13D24a7Ff6F5ec5ff0e9C40Fc3B8C9c01c65437B;
|
||||
}
|
||||
|
||||
vm.startPrank(owner);
|
||||
ProxyAdmin(admin).upgrade(ITransparentUpgradeableProxy(proxy), implementation);
|
||||
vm.stopPrank();
|
||||
}
|
||||
}
|
||||
122
contracts/src/test/integration/LidoGatewayIntegration.t.sol
Normal file
122
contracts/src/test/integration/LidoGatewayIntegration.t.sol
Normal file
@@ -0,0 +1,122 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
|
||||
|
||||
import {GatewayIntegrationBase} from "./GatewayIntegrationBase.t.sol";
|
||||
|
||||
import {IL1ERC20Gateway} from "../../L1/gateways/IL1ERC20Gateway.sol";
|
||||
import {IL2ERC20Gateway} from "../../L2/gateways/IL2ERC20Gateway.sol";
|
||||
import {L1LidoGateway} from "../../lido/L1LidoGateway.sol";
|
||||
import {L2LidoGateway} from "../../lido/L2LidoGateway.sol";
|
||||
|
||||
interface IWstETH {
|
||||
function wrap(uint256 _stETHAmount) external returns (uint256);
|
||||
|
||||
function unwrap(uint256 _wstETHAmount) external returns (uint256);
|
||||
|
||||
function getStETHByWstETH(uint256 _wstETHAmount) external view returns (uint256);
|
||||
|
||||
function getWstETHByStETH(uint256 _stETHAmount) external view returns (uint256);
|
||||
|
||||
function stEthPerToken() external view returns (uint256);
|
||||
|
||||
function tokensPerStEth() external view returns (uint256);
|
||||
}
|
||||
|
||||
contract LidoGatewayIntegrationTest is GatewayIntegrationBase {
|
||||
address private constant L1_LIDO_GATEWAY = 0x6625C6332c9F91F2D27c304E729B86db87A3f504;
|
||||
|
||||
address private constant L1_STETH = 0xae7ab96520DE3A18E5e111B5EaAb095312D7fE84;
|
||||
|
||||
address private constant L1_WSTETH = 0x7f39C581F595B53c5cb19bD0b3f8dA6c935E2Ca0;
|
||||
|
||||
address private constant L2_LIDO_GATEWAY = 0x8aE8f22226B9d789A36AC81474e633f8bE2856c9;
|
||||
|
||||
address private constant L2_WSTETH = 0xf610A9dfB7C89644979b4A0f27063E9e7d7Cda32;
|
||||
|
||||
function setUp() public {
|
||||
__GatewayIntegrationBase_setUp();
|
||||
|
||||
mainnet.selectFork();
|
||||
upgrade(
|
||||
true,
|
||||
L1_LIDO_GATEWAY,
|
||||
address(new L1LidoGateway(L1_WSTETH, L2_WSTETH, L2_LIDO_GATEWAY, L1_GATEWAY_ROUTER, L1_SCROLL_MESSENGER))
|
||||
);
|
||||
L1LidoGateway(L1_LIDO_GATEWAY).initializeV2(address(0), address(0), address(0), address(0));
|
||||
|
||||
scroll.selectFork();
|
||||
upgrade(
|
||||
false,
|
||||
L2_LIDO_GATEWAY,
|
||||
address(new L2LidoGateway(L1_WSTETH, L2_WSTETH, L1_LIDO_GATEWAY, L2_GATEWAY_ROUTER, L2_SCROLL_MESSENGER))
|
||||
);
|
||||
L2LidoGateway(L2_LIDO_GATEWAY).initializeV2(address(0), address(0), address(0), address(0));
|
||||
}
|
||||
|
||||
function testWithoutRouter() public {
|
||||
depositAndWithdraw(false);
|
||||
}
|
||||
|
||||
function testWithRouter() public {
|
||||
depositAndWithdraw(true);
|
||||
}
|
||||
|
||||
function depositAndWithdraw(bool useRouter) private {
|
||||
vm.recordLogs();
|
||||
|
||||
mainnet.selectFork();
|
||||
uint256 rate = IWstETH(L1_WSTETH).stEthPerToken();
|
||||
|
||||
// deposit to get some stETH
|
||||
(bool succeed, ) = L1_STETH.call{value: 11 * rate}("");
|
||||
assertEq(true, succeed);
|
||||
assertApproxEqAbs(MockERC20(L1_STETH).balanceOf(address(this)), 11 * rate, 10);
|
||||
|
||||
// wrap stETH to wstETH
|
||||
MockERC20(L1_STETH).approve(L1_WSTETH, 10 * rate);
|
||||
IWstETH(L1_WSTETH).wrap(10 * rate);
|
||||
assertApproxEqAbs(MockERC20(L1_WSTETH).balanceOf(address(this)), 10 ether, 10);
|
||||
|
||||
// deposit 1 wstETH
|
||||
uint256 l1GatewayBalance = MockERC20(L1_WSTETH).balanceOf(L1_LIDO_GATEWAY);
|
||||
uint256 l1Balance = MockERC20(L1_WSTETH).balanceOf(address(this));
|
||||
if (useRouter) {
|
||||
MockERC20(L1_WSTETH).approve(L1_GATEWAY_ROUTER, 1 ether);
|
||||
IL1ERC20Gateway(L1_GATEWAY_ROUTER).depositERC20{value: 1 ether}(L1_WSTETH, 1 ether, 400000);
|
||||
} else {
|
||||
MockERC20(L1_WSTETH).approve(L1_LIDO_GATEWAY, 1 ether);
|
||||
IL1ERC20Gateway(L1_LIDO_GATEWAY).depositERC20{value: 1 ether}(L1_WSTETH, 1 ether, 400000);
|
||||
}
|
||||
assertEq(l1Balance - 1 ether, MockERC20(L1_WSTETH).balanceOf(address(this)));
|
||||
assertEq(l1GatewayBalance + 1 ether, MockERC20(L1_WSTETH).balanceOf(L1_LIDO_GATEWAY));
|
||||
|
||||
// relay message to Scroll and check balance
|
||||
scroll.selectFork();
|
||||
uint256 l2Balance = MockERC20(L2_WSTETH).balanceOf(address(this));
|
||||
relayFromMainnet();
|
||||
|
||||
// withdraw wstETH
|
||||
scroll.selectFork();
|
||||
assertEq(l2Balance + 1 ether, MockERC20(L2_WSTETH).balanceOf(address(this)));
|
||||
assertEq(0, MockERC20(L2_WSTETH).balanceOf(L2_LIDO_GATEWAY));
|
||||
if (useRouter) {
|
||||
IL2ERC20Gateway(L2_GATEWAY_ROUTER).withdrawERC20(L2_WSTETH, 1 ether, 0);
|
||||
} else {
|
||||
IL2ERC20Gateway(L2_LIDO_GATEWAY).withdrawERC20(L2_WSTETH, 1 ether, 0);
|
||||
}
|
||||
assertEq(l2Balance, MockERC20(L2_WSTETH).balanceOf(address(this)));
|
||||
assertEq(0, MockERC20(L2_WSTETH).balanceOf(L2_LIDO_GATEWAY));
|
||||
|
||||
// relay message to Mainnet and check balance
|
||||
mainnet.selectFork();
|
||||
l1GatewayBalance = MockERC20(L1_WSTETH).balanceOf(L1_LIDO_GATEWAY);
|
||||
l1Balance = MockERC20(L1_WSTETH).balanceOf(address(this));
|
||||
relayFromScroll();
|
||||
mainnet.selectFork();
|
||||
assertEq(l1Balance + 1 ether, MockERC20(L1_WSTETH).balanceOf(address(this)));
|
||||
assertEq(l1GatewayBalance - 1 ether, MockERC20(L1_WSTETH).balanceOf(L1_LIDO_GATEWAY));
|
||||
}
|
||||
}
|
||||
705
contracts/src/test/lido/L1LidoGateway.t.sol
Normal file
705
contracts/src/test/lido/L1LidoGateway.t.sol
Normal file
@@ -0,0 +1,705 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
|
||||
|
||||
import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol";
|
||||
import {ITransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
|
||||
|
||||
import {IL1ERC20Gateway} from "../../L1/gateways/IL1ERC20Gateway.sol";
|
||||
import {L1GatewayRouter} from "../../L1/gateways/L1GatewayRouter.sol";
|
||||
import {IL1ScrollMessenger} from "../../L1/IL1ScrollMessenger.sol";
|
||||
import {IL2ERC20Gateway} from "../../L2/gateways/IL2ERC20Gateway.sol";
|
||||
import {AddressAliasHelper} from "../../libraries/common/AddressAliasHelper.sol";
|
||||
import {ScrollConstants} from "../../libraries/constants/ScrollConstants.sol";
|
||||
import {L2LidoGateway} from "../../lido/L2LidoGateway.sol";
|
||||
|
||||
import {L1GatewayTestBase} from "../L1GatewayTestBase.t.sol";
|
||||
import {MockL1LidoGateway} from "../mocks/MockL1LidoGateway.sol";
|
||||
import {MockScrollMessenger} from "../mocks/MockScrollMessenger.sol";
|
||||
import {MockGatewayRecipient} from "../mocks/MockGatewayRecipient.sol";
|
||||
|
||||
contract L1LidoGatewayTest is L1GatewayTestBase {
|
||||
// events from L1LidoGateway
|
||||
event FinalizeWithdrawERC20(
|
||||
address indexed _l1Token,
|
||||
address indexed _l2Token,
|
||||
address indexed _from,
|
||||
address _to,
|
||||
uint256 _amount,
|
||||
bytes _data
|
||||
);
|
||||
event DepositERC20(
|
||||
address indexed _l1Token,
|
||||
address indexed _l2Token,
|
||||
address indexed _from,
|
||||
address _to,
|
||||
uint256 _amount,
|
||||
bytes _data
|
||||
);
|
||||
event RefundERC20(address indexed token, address indexed recipient, uint256 amount);
|
||||
event DepositsEnabled(address indexed enabler);
|
||||
event DepositsDisabled(address indexed disabler);
|
||||
event WithdrawalsEnabled(address indexed enabler);
|
||||
event WithdrawalsDisabled(address indexed disabler);
|
||||
event RoleGranted(bytes32 indexed role, address indexed account, address indexed sender);
|
||||
event RoleRevoked(bytes32 indexed role, address indexed account, address indexed sender);
|
||||
|
||||
// errors from L1LidoGateway
|
||||
error ErrorDepositsEnabled();
|
||||
error ErrorDepositsDisabled();
|
||||
error ErrorWithdrawalsEnabled();
|
||||
error ErrorWithdrawalsDisabled();
|
||||
error ErrorCallerIsNotDepositsEnabler();
|
||||
error ErrorCallerIsNotDepositsDisabler();
|
||||
error ErrorCallerIsNotWithdrawalsEnabler();
|
||||
error ErrorCallerIsNotWithdrawalsDisabler();
|
||||
error ErrorUnsupportedL1Token();
|
||||
error ErrorUnsupportedL2Token();
|
||||
error ErrorAccountIsZeroAddress();
|
||||
error ErrorNonZeroMsgValue();
|
||||
error ErrorDepositZeroAmount();
|
||||
error DepositAndCallIsNotAllowed();
|
||||
|
||||
MockL1LidoGateway private gateway;
|
||||
L1GatewayRouter private router;
|
||||
|
||||
L2LidoGateway private counterpartGateway;
|
||||
|
||||
MockERC20 private l1Token;
|
||||
MockERC20 private l2Token;
|
||||
|
||||
function setUp() public {
|
||||
__L1GatewayTestBase_setUp();
|
||||
|
||||
// Deploy tokens
|
||||
l1Token = new MockERC20("Mock L1", "ML1", 18);
|
||||
l2Token = new MockERC20("Mock L2", "ML2", 18);
|
||||
|
||||
// Deploy L2 contracts
|
||||
counterpartGateway = new L2LidoGateway(address(l1Token), address(l2Token), address(1), address(1), address(1));
|
||||
|
||||
// Deploy L1 contracts
|
||||
router = L1GatewayRouter(_deployProxy(address(new L1GatewayRouter(address(l1Messenger)))));
|
||||
gateway = _deployGateway(address(l1Messenger));
|
||||
|
||||
// Initialize L1 contracts
|
||||
gateway.initialize(address(counterpartGateway), address(router), address(l1Messenger));
|
||||
gateway.initializeV2(address(0), address(0), address(0), address(0));
|
||||
router.initialize(address(0), address(gateway));
|
||||
|
||||
// Prepare token balances
|
||||
l1Token.mint(address(this), type(uint128).max);
|
||||
l1Token.approve(address(gateway), type(uint256).max);
|
||||
l1Token.approve(address(router), type(uint256).max);
|
||||
}
|
||||
|
||||
function testInitialized() public {
|
||||
// state in ScrollGatewayBase
|
||||
assertEq(address(this), gateway.owner());
|
||||
assertEq(address(counterpartGateway), gateway.counterpart());
|
||||
assertEq(address(router), gateway.router());
|
||||
assertEq(address(l1Messenger), gateway.messenger());
|
||||
|
||||
// state in LidoBridgeableTokens
|
||||
assertEq(address(l1Token), gateway.l1Token());
|
||||
assertEq(address(l2Token), gateway.l2Token());
|
||||
|
||||
// state in LidoGatewayManager
|
||||
assertBoolEq(true, gateway.isDepositsEnabled());
|
||||
assertBoolEq(true, gateway.isWithdrawalsEnabled());
|
||||
|
||||
// state in L1LidoGateway
|
||||
assertEq(address(l2Token), gateway.getL2ERC20Address(address(l1Token)));
|
||||
|
||||
hevm.expectRevert("Initializable: contract is already initialized");
|
||||
gateway.initialize(address(counterpartGateway), address(router), address(l1Messenger));
|
||||
|
||||
hevm.expectRevert("Initializable: contract is already initialized");
|
||||
gateway.initializeV2(address(0), address(0), address(0), address(0));
|
||||
|
||||
gateway.revokeRole(gateway.DEPOSITS_ENABLER_ROLE(), address(0));
|
||||
gateway.revokeRole(gateway.DEPOSITS_DISABLER_ROLE(), address(0));
|
||||
gateway.revokeRole(gateway.WITHDRAWALS_ENABLER_ROLE(), address(0));
|
||||
gateway.revokeRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(0));
|
||||
}
|
||||
|
||||
/*************************************
|
||||
* Functions from LidoGatewayManager *
|
||||
*************************************/
|
||||
|
||||
function testEnableDeposits() external {
|
||||
// revert when already enabled
|
||||
hevm.expectRevert(ErrorDepositsEnabled.selector);
|
||||
gateway.enableDeposits();
|
||||
|
||||
// revert when caller is not deposits enabler
|
||||
gateway.grantRole(gateway.DEPOSITS_DISABLER_ROLE(), address(this));
|
||||
gateway.disableDeposits();
|
||||
hevm.expectRevert(ErrorCallerIsNotDepositsEnabler.selector);
|
||||
gateway.enableDeposits();
|
||||
|
||||
// succeed
|
||||
gateway.grantRole(gateway.DEPOSITS_ENABLER_ROLE(), address(this));
|
||||
assertBoolEq(false, gateway.isDepositsEnabled());
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit DepositsEnabled(address(this));
|
||||
gateway.enableDeposits();
|
||||
assertBoolEq(true, gateway.isDepositsEnabled());
|
||||
}
|
||||
|
||||
function testDisableDeposits() external {
|
||||
// revert when already disabled
|
||||
gateway.grantRole(gateway.DEPOSITS_DISABLER_ROLE(), address(this));
|
||||
gateway.disableDeposits();
|
||||
assertBoolEq(false, gateway.isDepositsEnabled());
|
||||
hevm.expectRevert(ErrorDepositsDisabled.selector);
|
||||
gateway.disableDeposits();
|
||||
|
||||
// revert when caller is not deposits disabler
|
||||
gateway.grantRole(gateway.DEPOSITS_ENABLER_ROLE(), address(this));
|
||||
gateway.enableDeposits();
|
||||
assertBoolEq(true, gateway.isDepositsEnabled());
|
||||
gateway.revokeRole(gateway.DEPOSITS_DISABLER_ROLE(), address(this));
|
||||
hevm.expectRevert(ErrorCallerIsNotDepositsDisabler.selector);
|
||||
gateway.disableDeposits();
|
||||
|
||||
// succeed
|
||||
gateway.grantRole(gateway.DEPOSITS_DISABLER_ROLE(), address(this));
|
||||
assertBoolEq(true, gateway.isDepositsEnabled());
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit DepositsDisabled(address(this));
|
||||
gateway.disableDeposits();
|
||||
assertBoolEq(false, gateway.isDepositsEnabled());
|
||||
}
|
||||
|
||||
function testEnableWithdrawals() external {
|
||||
// revert when already enabled
|
||||
hevm.expectRevert(ErrorWithdrawalsEnabled.selector);
|
||||
gateway.enableWithdrawals();
|
||||
|
||||
// revert when caller is not deposits enabler
|
||||
gateway.grantRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(this));
|
||||
gateway.disableWithdrawals();
|
||||
hevm.expectRevert(ErrorCallerIsNotWithdrawalsEnabler.selector);
|
||||
gateway.enableWithdrawals();
|
||||
|
||||
// succeed
|
||||
gateway.grantRole(gateway.WITHDRAWALS_ENABLER_ROLE(), address(this));
|
||||
assertBoolEq(false, gateway.isWithdrawalsEnabled());
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit WithdrawalsEnabled(address(this));
|
||||
gateway.enableWithdrawals();
|
||||
assertBoolEq(true, gateway.isWithdrawalsEnabled());
|
||||
}
|
||||
|
||||
function testDisableWithdrawals() external {
|
||||
// revert when already disabled
|
||||
gateway.grantRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(this));
|
||||
gateway.disableWithdrawals();
|
||||
assertBoolEq(false, gateway.isWithdrawalsEnabled());
|
||||
hevm.expectRevert(ErrorWithdrawalsDisabled.selector);
|
||||
gateway.disableWithdrawals();
|
||||
|
||||
// revert when caller is not deposits disabler
|
||||
gateway.grantRole(gateway.WITHDRAWALS_ENABLER_ROLE(), address(this));
|
||||
gateway.enableWithdrawals();
|
||||
assertBoolEq(true, gateway.isWithdrawalsEnabled());
|
||||
gateway.revokeRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(this));
|
||||
hevm.expectRevert(ErrorCallerIsNotWithdrawalsDisabler.selector);
|
||||
gateway.disableWithdrawals();
|
||||
|
||||
// succeed
|
||||
gateway.grantRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(this));
|
||||
assertBoolEq(true, gateway.isWithdrawalsEnabled());
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit WithdrawalsDisabled(address(this));
|
||||
gateway.disableWithdrawals();
|
||||
assertBoolEq(false, gateway.isWithdrawalsEnabled());
|
||||
}
|
||||
|
||||
function testGrantRole(bytes32 _role, address _account) external {
|
||||
// revert not owner
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
gateway.grantRole(_role, _account);
|
||||
hevm.stopPrank();
|
||||
|
||||
// succeed
|
||||
assertBoolEq(gateway.hasRole(_role, _account), false);
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit RoleGranted(_role, _account, address(this));
|
||||
gateway.grantRole(_role, _account);
|
||||
assertBoolEq(gateway.hasRole(_role, _account), true);
|
||||
assertEq(gateway.getRoleMemberCount(_role), 1);
|
||||
assertEq(gateway.getRoleMember(_role, 0), _account);
|
||||
|
||||
// do nothing regrant
|
||||
gateway.grantRole(_role, _account);
|
||||
assertBoolEq(gateway.hasRole(_role, _account), true);
|
||||
assertEq(gateway.getRoleMemberCount(_role), 1);
|
||||
assertEq(gateway.getRoleMember(_role, 0), _account);
|
||||
}
|
||||
|
||||
function testRevokeRole(bytes32 _role, address _account) external {
|
||||
// revert not owner
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
gateway.revokeRole(_role, _account);
|
||||
hevm.stopPrank();
|
||||
|
||||
// grant first
|
||||
gateway.grantRole(_role, _account);
|
||||
assertBoolEq(gateway.hasRole(_role, _account), true);
|
||||
assertEq(gateway.getRoleMemberCount(_role), 1);
|
||||
assertEq(gateway.getRoleMember(_role, 0), _account);
|
||||
|
||||
// revoke
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit RoleRevoked(_role, _account, address(this));
|
||||
gateway.revokeRole(_role, _account);
|
||||
assertBoolEq(gateway.hasRole(_role, _account), false);
|
||||
assertEq(gateway.getRoleMemberCount(_role), 0);
|
||||
|
||||
// revoke again
|
||||
gateway.revokeRole(_role, _account);
|
||||
assertBoolEq(gateway.hasRole(_role, _account), false);
|
||||
assertEq(gateway.getRoleMemberCount(_role), 0);
|
||||
}
|
||||
|
||||
/********************************
|
||||
* Functions from L1LidoGateway *
|
||||
********************************/
|
||||
|
||||
function testDepositERC20(
|
||||
uint256 amount,
|
||||
uint256 gasLimit,
|
||||
uint256 feePerGas
|
||||
) external {
|
||||
_depositERC20(false, 0, amount, address(this), new bytes(0), gasLimit, feePerGas);
|
||||
}
|
||||
|
||||
function testDepositERC20WithRecipient(
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
uint256 gasLimit,
|
||||
uint256 feePerGas
|
||||
) external {
|
||||
_depositERC20(false, 1, amount, recipient, new bytes(0), gasLimit, feePerGas);
|
||||
}
|
||||
|
||||
function testDepositERC20WithRecipientAndCalldata(
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
bytes memory dataToCall,
|
||||
uint256 gasLimit,
|
||||
uint256 feePerGas
|
||||
) external {
|
||||
_depositERC20(false, 2, amount, recipient, dataToCall, gasLimit, feePerGas);
|
||||
}
|
||||
|
||||
function testDepositERC20ByRouter(
|
||||
uint256 amount,
|
||||
uint256 gasLimit,
|
||||
uint256 feePerGas
|
||||
) external {
|
||||
_depositERC20(true, 0, amount, address(this), new bytes(0), gasLimit, feePerGas);
|
||||
}
|
||||
|
||||
function testDepositERC20WithRecipientByRouter(
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
uint256 gasLimit,
|
||||
uint256 feePerGas
|
||||
) external {
|
||||
_depositERC20(true, 1, amount, recipient, new bytes(0), gasLimit, feePerGas);
|
||||
}
|
||||
|
||||
function testDepositERC20WithRecipientAndCalldataByRouter(
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
bytes memory dataToCall,
|
||||
uint256 gasLimit,
|
||||
uint256 feePerGas
|
||||
) external {
|
||||
_depositERC20(true, 2, amount, recipient, dataToCall, gasLimit, feePerGas);
|
||||
}
|
||||
|
||||
function testDropMessage(uint256 amount, address recipient) public {
|
||||
hevm.assume(recipient != address(0));
|
||||
|
||||
amount = bound(amount, 1, l1Token.balanceOf(address(this)));
|
||||
bytes memory message = abi.encodeCall(
|
||||
IL2ERC20Gateway.finalizeDepositERC20,
|
||||
(address(l1Token), address(l2Token), address(this), recipient, amount, new bytes(0))
|
||||
);
|
||||
gateway.depositERC20AndCall(address(l1Token), recipient, amount, new bytes(0), defaultGasLimit);
|
||||
|
||||
MockScrollMessenger mockMessenger = new MockScrollMessenger();
|
||||
MockL1LidoGateway mockGateway = _deployGateway(address(mockMessenger));
|
||||
mockGateway.initialize(address(counterpartGateway), address(router), address(mockMessenger));
|
||||
mockGateway.initializeV2(address(0), address(0), address(0), address(0));
|
||||
|
||||
// revert caller is not messenger
|
||||
hevm.expectRevert(ErrorCallerIsNotMessenger.selector);
|
||||
mockGateway.onDropMessage(new bytes(0));
|
||||
|
||||
// revert not in drop context
|
||||
hevm.expectRevert(ErrorNotInDropMessageContext.selector);
|
||||
mockMessenger.callTarget(address(mockGateway), abi.encodeCall(mockGateway.onDropMessage, (new bytes(0))));
|
||||
|
||||
// revert when reentrant
|
||||
mockMessenger.setXDomainMessageSender(ScrollConstants.DROP_XDOMAIN_MESSAGE_SENDER);
|
||||
hevm.expectRevert("ReentrancyGuard: reentrant call");
|
||||
mockGateway.reentrantCall(
|
||||
address(mockMessenger),
|
||||
abi.encodeCall(
|
||||
mockMessenger.callTarget,
|
||||
(address(mockGateway), abi.encodeCall(mockGateway.onDropMessage, (message)))
|
||||
)
|
||||
);
|
||||
|
||||
// revert when invalid selector
|
||||
hevm.expectRevert("invalid selector");
|
||||
mockMessenger.callTarget(address(mockGateway), abi.encodeCall(mockGateway.onDropMessage, (new bytes(4))));
|
||||
|
||||
// revert when l1 token not supported
|
||||
hevm.expectRevert(ErrorUnsupportedL1Token.selector);
|
||||
mockMessenger.callTarget(
|
||||
address(mockGateway),
|
||||
abi.encodeCall(
|
||||
mockGateway.onDropMessage,
|
||||
(
|
||||
abi.encodeCall(
|
||||
IL2ERC20Gateway.finalizeDepositERC20,
|
||||
(address(l2Token), address(l2Token), address(this), recipient, amount, new bytes(0))
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
// revert when nonzero msg.value
|
||||
hevm.expectRevert(ErrorNonZeroMsgValue.selector);
|
||||
mockMessenger.callTarget{value: 1}(
|
||||
address(mockGateway),
|
||||
abi.encodeWithSelector(mockGateway.onDropMessage.selector, message)
|
||||
);
|
||||
|
||||
// succeed on drop
|
||||
// skip message 0
|
||||
hevm.startPrank(address(rollup));
|
||||
messageQueue.popCrossDomainMessage(0, 1, 0x1);
|
||||
assertEq(messageQueue.pendingQueueIndex(), 1);
|
||||
hevm.stopPrank();
|
||||
|
||||
// should emit RefundERC20
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
emit RefundERC20(address(l1Token), address(this), amount);
|
||||
|
||||
uint256 balance = l1Token.balanceOf(address(this));
|
||||
uint256 gatewayBalance = l1Token.balanceOf(address(gateway));
|
||||
l1Messenger.dropMessage(address(gateway), address(counterpartGateway), 0, 0, message);
|
||||
assertEq(gatewayBalance - amount, l1Token.balanceOf(address(gateway)));
|
||||
assertEq(balance + amount, l1Token.balanceOf(address(this)));
|
||||
}
|
||||
|
||||
function testFinalizeWithdrawERC20(
|
||||
address sender,
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) external {
|
||||
amount = bound(amount, 1, l1Token.balanceOf(address(this)));
|
||||
MockGatewayRecipient recipient = new MockGatewayRecipient();
|
||||
bytes memory message = abi.encodeCall(
|
||||
IL1ERC20Gateway.finalizeWithdrawERC20,
|
||||
(address(l1Token), address(l2Token), sender, address(recipient), amount, dataToCall)
|
||||
);
|
||||
gateway.depositERC20(address(l1Token), amount, defaultGasLimit); // deposit some token to L1LidoGateway
|
||||
|
||||
MockScrollMessenger mockMessenger = new MockScrollMessenger();
|
||||
MockL1LidoGateway mockGateway = _deployGateway(address(mockMessenger));
|
||||
mockGateway.initialize(address(counterpartGateway), address(router), address(mockMessenger));
|
||||
mockGateway.initializeV2(address(0), address(0), address(0), address(0));
|
||||
|
||||
// revert caller is not messenger
|
||||
hevm.expectRevert(ErrorCallerIsNotMessenger.selector);
|
||||
mockGateway.finalizeWithdrawERC20(
|
||||
address(l1Token),
|
||||
address(l2Token),
|
||||
sender,
|
||||
address(recipient),
|
||||
amount,
|
||||
dataToCall
|
||||
);
|
||||
|
||||
// revert not called by counterpart
|
||||
hevm.expectRevert(ErrorCallerIsNotCounterpartGateway.selector);
|
||||
mockMessenger.callTarget(address(mockGateway), message);
|
||||
|
||||
// revert when reentrant
|
||||
mockMessenger.setXDomainMessageSender(address(counterpartGateway));
|
||||
hevm.expectRevert("ReentrancyGuard: reentrant call");
|
||||
mockGateway.reentrantCall(
|
||||
address(mockMessenger),
|
||||
abi.encodeCall(mockMessenger.callTarget, (address(mockGateway), message))
|
||||
);
|
||||
|
||||
// revert when l1 token not supported
|
||||
hevm.expectRevert(ErrorUnsupportedL1Token.selector);
|
||||
mockMessenger.callTarget(
|
||||
address(mockGateway),
|
||||
abi.encodeCall(
|
||||
IL1ERC20Gateway.finalizeWithdrawERC20,
|
||||
(address(l2Token), address(l2Token), sender, address(recipient), amount, dataToCall)
|
||||
)
|
||||
);
|
||||
|
||||
// revert when l2 token not supported
|
||||
hevm.expectRevert(ErrorUnsupportedL2Token.selector);
|
||||
mockMessenger.callTarget(
|
||||
address(mockGateway),
|
||||
abi.encodeCall(
|
||||
IL1ERC20Gateway.finalizeWithdrawERC20,
|
||||
(address(l1Token), address(l1Token), sender, address(recipient), amount, dataToCall)
|
||||
)
|
||||
);
|
||||
|
||||
// revert when withdrawals disabled
|
||||
mockGateway.grantRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(this));
|
||||
mockGateway.disableWithdrawals();
|
||||
hevm.expectRevert(ErrorWithdrawalsDisabled.selector);
|
||||
mockMessenger.callTarget(address(mockGateway), message);
|
||||
|
||||
// revert when nonzero msg.value
|
||||
mockGateway.grantRole(gateway.WITHDRAWALS_ENABLER_ROLE(), address(this));
|
||||
mockGateway.enableWithdrawals();
|
||||
hevm.expectRevert(ErrorNonZeroMsgValue.selector);
|
||||
mockMessenger.callTarget{value: 1}(address(mockGateway), message);
|
||||
|
||||
// succeed when finialize
|
||||
bytes memory xDomainCalldata = abi.encodeCall(
|
||||
l2Messenger.relayMessage,
|
||||
(address(counterpartGateway), address(gateway), 0, 0, message)
|
||||
);
|
||||
prepareL2MessageRoot(keccak256(xDomainCalldata));
|
||||
IL1ScrollMessenger.L2MessageProof memory proof;
|
||||
proof.batchIndex = rollup.lastFinalizedBatchIndex();
|
||||
|
||||
// should emit FinalizeWithdrawERC20 from L1StandardERC20Gateway
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit FinalizeWithdrawERC20(address(l1Token), address(l2Token), sender, address(recipient), amount, dataToCall);
|
||||
|
||||
// should emit RelayedMessage from L1ScrollMessenger
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit RelayedMessage(keccak256(xDomainCalldata));
|
||||
|
||||
uint256 gatewayBalance = l1Token.balanceOf(address(gateway));
|
||||
uint256 recipientBalance = l1Token.balanceOf(address(recipient));
|
||||
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
|
||||
l1Messenger.relayMessageWithProof(address(counterpartGateway), address(gateway), 0, 0, message, proof);
|
||||
assertBoolEq(true, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
|
||||
assertEq(recipientBalance + amount, l1Token.balanceOf(address(recipient)));
|
||||
assertEq(gatewayBalance - amount, l1Token.balanceOf(address(gateway)));
|
||||
}
|
||||
|
||||
function _depositERC20(
|
||||
bool useRouter,
|
||||
uint256 methodType,
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
bytes memory dataToCall,
|
||||
uint256 gasLimit,
|
||||
uint256 feePerGas
|
||||
) private {
|
||||
hevm.assume(recipient != address(0));
|
||||
amount = bound(amount, 1, l1Token.balanceOf(address(this)));
|
||||
gasLimit = bound(gasLimit, defaultGasLimit / 2, defaultGasLimit);
|
||||
feePerGas = bound(feePerGas, 0, 1000);
|
||||
messageQueue.setL2BaseFee(feePerGas);
|
||||
feePerGas = feePerGas * gasLimit;
|
||||
|
||||
// revert when reentrant
|
||||
hevm.expectRevert("ReentrancyGuard: reentrant call");
|
||||
{
|
||||
bytes memory reentrantData;
|
||||
if (methodType == 0) {
|
||||
reentrantData = abi.encodeWithSignature(
|
||||
"depositERC20(address,uint256,uint256)",
|
||||
address(l1Token),
|
||||
amount,
|
||||
gasLimit
|
||||
);
|
||||
} else if (methodType == 1) {
|
||||
reentrantData = abi.encodeWithSignature(
|
||||
"depositERC20(address,address,uint256,uint256)",
|
||||
address(l1Token),
|
||||
recipient,
|
||||
amount,
|
||||
gasLimit
|
||||
);
|
||||
} else if (methodType == 2) {
|
||||
reentrantData = abi.encodeCall(
|
||||
IL1ERC20Gateway.depositERC20AndCall,
|
||||
(address(l1Token), recipient, amount, dataToCall, gasLimit)
|
||||
);
|
||||
}
|
||||
gateway.reentrantCall(useRouter ? address(router) : address(gateway), reentrantData);
|
||||
}
|
||||
|
||||
// revert when l1 token not support
|
||||
hevm.expectRevert(ErrorUnsupportedL1Token.selector);
|
||||
_invokeDepositERC20Call(
|
||||
useRouter,
|
||||
methodType,
|
||||
address(l2Token),
|
||||
amount,
|
||||
recipient,
|
||||
dataToCall,
|
||||
gasLimit,
|
||||
feePerGas
|
||||
);
|
||||
|
||||
// revert when to is zero address
|
||||
if (methodType != 0) {
|
||||
hevm.expectRevert(ErrorAccountIsZeroAddress.selector);
|
||||
_invokeDepositERC20Call(
|
||||
useRouter,
|
||||
methodType,
|
||||
address(l1Token),
|
||||
amount,
|
||||
address(0),
|
||||
dataToCall,
|
||||
gasLimit,
|
||||
feePerGas
|
||||
);
|
||||
}
|
||||
|
||||
// revert when deposits disabled
|
||||
gateway.grantRole(gateway.DEPOSITS_DISABLER_ROLE(), address(this));
|
||||
gateway.disableDeposits();
|
||||
hevm.expectRevert(ErrorDepositsDisabled.selector);
|
||||
_invokeDepositERC20Call(
|
||||
useRouter,
|
||||
methodType,
|
||||
address(l1Token),
|
||||
amount,
|
||||
recipient,
|
||||
dataToCall,
|
||||
gasLimit,
|
||||
feePerGas
|
||||
);
|
||||
|
||||
// revert when deposit zero amount
|
||||
gateway.grantRole(gateway.DEPOSITS_ENABLER_ROLE(), address(this));
|
||||
gateway.enableDeposits();
|
||||
hevm.expectRevert(ErrorDepositZeroAmount.selector);
|
||||
_invokeDepositERC20Call(useRouter, methodType, address(l1Token), 0, recipient, dataToCall, gasLimit, feePerGas);
|
||||
|
||||
// revert when data is not empty
|
||||
if (dataToCall.length != 0) {
|
||||
hevm.expectRevert(DepositAndCallIsNotAllowed.selector);
|
||||
_invokeDepositERC20Call(
|
||||
useRouter,
|
||||
methodType,
|
||||
address(l1Token),
|
||||
amount,
|
||||
recipient,
|
||||
dataToCall,
|
||||
gasLimit,
|
||||
feePerGas
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// succeed to deposit
|
||||
bytes memory message = abi.encodeCall(
|
||||
IL2ERC20Gateway.finalizeDepositERC20,
|
||||
(address(l1Token), address(l2Token), address(this), recipient, amount, dataToCall)
|
||||
);
|
||||
bytes memory xDomainCalldata = abi.encodeCall(
|
||||
l2Messenger.relayMessage,
|
||||
(address(gateway), address(counterpartGateway), 0, 0, message)
|
||||
);
|
||||
// should emit QueueTransaction from L1MessageQueue
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
address sender = AddressAliasHelper.applyL1ToL2Alias(address(l1Messenger));
|
||||
emit QueueTransaction(sender, address(l2Messenger), 0, 0, gasLimit, xDomainCalldata);
|
||||
|
||||
// should emit SentMessage from L1ScrollMessenger
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
emit SentMessage(address(gateway), address(counterpartGateway), 0, 0, gasLimit, message);
|
||||
|
||||
// should emit DepositERC20 from L1CustomERC20Gateway
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit DepositERC20(address(l1Token), address(l2Token), address(this), recipient, amount, dataToCall);
|
||||
|
||||
uint256 gatewayBalance = l1Token.balanceOf(address(gateway));
|
||||
uint256 feeVaultBalance = address(feeVault).balance;
|
||||
uint256 thisBalance = l1Token.balanceOf(address(this));
|
||||
assertEq(l1Messenger.messageSendTimestamp(keccak256(xDomainCalldata)), 0);
|
||||
uint256 balance = address(this).balance;
|
||||
_invokeDepositERC20Call(
|
||||
useRouter,
|
||||
methodType,
|
||||
address(l1Token),
|
||||
amount,
|
||||
recipient,
|
||||
dataToCall,
|
||||
gasLimit,
|
||||
feePerGas
|
||||
);
|
||||
assertEq(balance - feePerGas, address(this).balance); // extra value is transfered back
|
||||
assertGt(l1Messenger.messageSendTimestamp(keccak256(xDomainCalldata)), 0);
|
||||
assertEq(thisBalance - amount, l1Token.balanceOf(address(this)));
|
||||
assertEq(feeVaultBalance + feePerGas, address(feeVault).balance);
|
||||
assertEq(gatewayBalance + amount, l1Token.balanceOf(address(gateway)));
|
||||
}
|
||||
|
||||
function _invokeDepositERC20Call(
|
||||
bool useRouter,
|
||||
uint256 methodType,
|
||||
address token,
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
bytes memory dataToCall,
|
||||
uint256 gasLimit,
|
||||
uint256 feeToPay
|
||||
) private {
|
||||
uint256 value = feeToPay + extraValue;
|
||||
if (useRouter) {
|
||||
if (methodType == 0) {
|
||||
router.depositERC20{value: value}(token, amount, gasLimit);
|
||||
} else if (methodType == 1) {
|
||||
router.depositERC20{value: value}(token, recipient, amount, gasLimit);
|
||||
} else if (methodType == 2) {
|
||||
router.depositERC20AndCall{value: value}(token, recipient, amount, dataToCall, gasLimit);
|
||||
}
|
||||
} else {
|
||||
if (methodType == 0) {
|
||||
gateway.depositERC20{value: value}(token, amount, gasLimit);
|
||||
} else if (methodType == 1) {
|
||||
gateway.depositERC20{value: value}(token, recipient, amount, gasLimit);
|
||||
} else if (methodType == 2) {
|
||||
gateway.depositERC20AndCall{value: value}(token, recipient, amount, dataToCall, gasLimit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function _deployGateway(address messenger) internal returns (MockL1LidoGateway _gateway) {
|
||||
_gateway = MockL1LidoGateway(_deployProxy(address(0)));
|
||||
|
||||
admin.upgrade(
|
||||
ITransparentUpgradeableProxy(address(_gateway)),
|
||||
address(
|
||||
new MockL1LidoGateway(
|
||||
address(l1Token),
|
||||
address(l2Token),
|
||||
address(counterpartGateway),
|
||||
address(router),
|
||||
address(messenger)
|
||||
)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
565
contracts/src/test/lido/L2LidoGateway.t.sol
Normal file
565
contracts/src/test/lido/L2LidoGateway.t.sol
Normal file
@@ -0,0 +1,565 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
|
||||
|
||||
import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol";
|
||||
import {ITransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
|
||||
|
||||
import {IL1ERC20Gateway} from "../../L1/gateways/IL1ERC20Gateway.sol";
|
||||
import {IL2ERC20Gateway} from "../../L2/gateways/IL2ERC20Gateway.sol";
|
||||
import {L2GatewayRouter} from "../../L2/gateways/L2GatewayRouter.sol";
|
||||
import {AddressAliasHelper} from "../../libraries/common/AddressAliasHelper.sol";
|
||||
import {ScrollStandardERC20} from "../../libraries/token/ScrollStandardERC20.sol";
|
||||
import {L1LidoGateway} from "../../lido/L1LidoGateway.sol";
|
||||
|
||||
import {L2GatewayTestBase} from "../L2GatewayTestBase.t.sol";
|
||||
import {MockGatewayRecipient} from "../mocks/MockGatewayRecipient.sol";
|
||||
import {MockL2LidoGateway} from "../mocks/MockL2LidoGateway.sol";
|
||||
import {MockScrollMessenger} from "../mocks/MockScrollMessenger.sol";
|
||||
|
||||
contract L2LidoGatewayTest is L2GatewayTestBase {
|
||||
// events from L2LidoGateway
|
||||
event WithdrawERC20(
|
||||
address indexed _l1Token,
|
||||
address indexed _l2Token,
|
||||
address indexed _from,
|
||||
address _to,
|
||||
uint256 _amount,
|
||||
bytes _data
|
||||
);
|
||||
event FinalizeDepositERC20(
|
||||
address indexed _l1Token,
|
||||
address indexed _l2Token,
|
||||
address indexed _from,
|
||||
address _to,
|
||||
uint256 _amount,
|
||||
bytes _data
|
||||
);
|
||||
event DepositsEnabled(address indexed enabler);
|
||||
event DepositsDisabled(address indexed disabler);
|
||||
event WithdrawalsEnabled(address indexed enabler);
|
||||
event WithdrawalsDisabled(address indexed disabler);
|
||||
event RoleGranted(bytes32 indexed role, address indexed account, address indexed sender);
|
||||
event RoleRevoked(bytes32 indexed role, address indexed account, address indexed sender);
|
||||
|
||||
// errors from L2LidoGateway
|
||||
error ErrorDepositsEnabled();
|
||||
error ErrorDepositsDisabled();
|
||||
error ErrorWithdrawalsEnabled();
|
||||
error ErrorWithdrawalsDisabled();
|
||||
error ErrorCallerIsNotDepositsEnabler();
|
||||
error ErrorCallerIsNotDepositsDisabler();
|
||||
error ErrorCallerIsNotWithdrawalsEnabler();
|
||||
error ErrorCallerIsNotWithdrawalsDisabler();
|
||||
error ErrorUnsupportedL1Token();
|
||||
error ErrorUnsupportedL2Token();
|
||||
error ErrorAccountIsZeroAddress();
|
||||
error ErrorNonZeroMsgValue();
|
||||
error ErrorWithdrawZeroAmount();
|
||||
error WithdrawAndCallIsNotAllowed();
|
||||
|
||||
MockL2LidoGateway private gateway;
|
||||
L2GatewayRouter private router;
|
||||
|
||||
L1LidoGateway private counterpartGateway;
|
||||
|
||||
MockERC20 private l1Token;
|
||||
ScrollStandardERC20 private l2Token;
|
||||
|
||||
function setUp() public {
|
||||
setUpBase();
|
||||
// Deploy tokens
|
||||
l1Token = new MockERC20("Mock L1", "ML1", 18);
|
||||
l2Token = ScrollStandardERC20(address(new ERC1967Proxy(address(new ScrollStandardERC20()), new bytes(0))));
|
||||
|
||||
// Deploy L1 contracts
|
||||
counterpartGateway = new L1LidoGateway(address(l1Token), address(l2Token), address(1), address(1), address(1));
|
||||
|
||||
// Deploy L2 contracts
|
||||
router = L2GatewayRouter(_deployProxy(address(new L2GatewayRouter(address(l2Messenger)))));
|
||||
gateway = _deployGateway(address(l2Messenger));
|
||||
|
||||
// Initialize L2 contracts
|
||||
gateway.initialize(address(counterpartGateway), address(router), address(l2Messenger));
|
||||
gateway.initializeV2(address(0), address(0), address(0), address(0));
|
||||
router.initialize(address(0), address(gateway));
|
||||
l2Token.initialize("Mock L2", "ML2", 18, address(gateway), address(l1Token));
|
||||
|
||||
// Prepare token balances
|
||||
hevm.startPrank(address(gateway));
|
||||
l2Token.mint(address(this), type(uint128).max);
|
||||
hevm.stopPrank();
|
||||
|
||||
gateway.revokeRole(gateway.DEPOSITS_ENABLER_ROLE(), address(0));
|
||||
gateway.revokeRole(gateway.DEPOSITS_DISABLER_ROLE(), address(0));
|
||||
gateway.revokeRole(gateway.WITHDRAWALS_ENABLER_ROLE(), address(0));
|
||||
gateway.revokeRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(0));
|
||||
}
|
||||
|
||||
function testInitialized() external {
|
||||
// state in ScrollGatewayBase
|
||||
assertEq(address(this), gateway.owner());
|
||||
assertEq(address(counterpartGateway), gateway.counterpart());
|
||||
assertEq(address(router), gateway.router());
|
||||
assertEq(address(l2Messenger), gateway.messenger());
|
||||
|
||||
// state in LidoBridgeableTokens
|
||||
assertEq(address(l1Token), gateway.l1Token());
|
||||
assertEq(address(l2Token), gateway.l2Token());
|
||||
|
||||
// state in LidoGatewayManager
|
||||
assertBoolEq(true, gateway.isDepositsEnabled());
|
||||
assertBoolEq(true, gateway.isWithdrawalsEnabled());
|
||||
|
||||
// state in L2LidoGateway
|
||||
assertEq(address(l1Token), gateway.getL1ERC20Address(address(l2Token)));
|
||||
assertEq(address(l2Token), gateway.getL2ERC20Address(address(l1Token)));
|
||||
|
||||
hevm.expectRevert("Initializable: contract is already initialized");
|
||||
gateway.initialize(address(counterpartGateway), address(router), address(l2Messenger));
|
||||
|
||||
hevm.expectRevert("Initializable: contract is already initialized");
|
||||
gateway.initializeV2(address(0), address(0), address(0), address(0));
|
||||
}
|
||||
|
||||
/*************************************
|
||||
* Functions from LidoGatewayManager *
|
||||
*************************************/
|
||||
|
||||
function testEnableDeposits() external {
|
||||
// revert when already enabled
|
||||
hevm.expectRevert(ErrorDepositsEnabled.selector);
|
||||
gateway.enableDeposits();
|
||||
|
||||
// revert when caller is not deposits enabler
|
||||
gateway.grantRole(gateway.DEPOSITS_DISABLER_ROLE(), address(this));
|
||||
gateway.disableDeposits();
|
||||
hevm.expectRevert(ErrorCallerIsNotDepositsEnabler.selector);
|
||||
gateway.enableDeposits();
|
||||
|
||||
// succeed
|
||||
gateway.grantRole(gateway.DEPOSITS_ENABLER_ROLE(), address(this));
|
||||
assertBoolEq(false, gateway.isDepositsEnabled());
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit DepositsEnabled(address(this));
|
||||
gateway.enableDeposits();
|
||||
assertBoolEq(true, gateway.isDepositsEnabled());
|
||||
}
|
||||
|
||||
function testDisableDeposits() external {
|
||||
// revert when already disabled
|
||||
gateway.grantRole(gateway.DEPOSITS_DISABLER_ROLE(), address(this));
|
||||
gateway.disableDeposits();
|
||||
assertBoolEq(false, gateway.isDepositsEnabled());
|
||||
hevm.expectRevert(ErrorDepositsDisabled.selector);
|
||||
gateway.disableDeposits();
|
||||
|
||||
// revert when caller is not deposits disabler
|
||||
gateway.grantRole(gateway.DEPOSITS_ENABLER_ROLE(), address(this));
|
||||
gateway.enableDeposits();
|
||||
assertBoolEq(true, gateway.isDepositsEnabled());
|
||||
gateway.revokeRole(gateway.DEPOSITS_DISABLER_ROLE(), address(this));
|
||||
hevm.expectRevert(ErrorCallerIsNotDepositsDisabler.selector);
|
||||
gateway.disableDeposits();
|
||||
|
||||
// succeed
|
||||
gateway.grantRole(gateway.DEPOSITS_DISABLER_ROLE(), address(this));
|
||||
assertBoolEq(true, gateway.isDepositsEnabled());
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit DepositsDisabled(address(this));
|
||||
gateway.disableDeposits();
|
||||
assertBoolEq(false, gateway.isDepositsEnabled());
|
||||
}
|
||||
|
||||
function testEnableWithdrawals() external {
|
||||
// revert when already enabled
|
||||
hevm.expectRevert(ErrorWithdrawalsEnabled.selector);
|
||||
gateway.enableWithdrawals();
|
||||
|
||||
// revert when caller is not deposits enabler
|
||||
gateway.grantRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(this));
|
||||
gateway.disableWithdrawals();
|
||||
hevm.expectRevert(ErrorCallerIsNotWithdrawalsEnabler.selector);
|
||||
gateway.enableWithdrawals();
|
||||
|
||||
// succeed
|
||||
gateway.grantRole(gateway.WITHDRAWALS_ENABLER_ROLE(), address(this));
|
||||
assertBoolEq(false, gateway.isWithdrawalsEnabled());
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit WithdrawalsEnabled(address(this));
|
||||
gateway.enableWithdrawals();
|
||||
assertBoolEq(true, gateway.isWithdrawalsEnabled());
|
||||
}
|
||||
|
||||
function testDisableWithdrawals() external {
|
||||
// revert when already disabled
|
||||
gateway.grantRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(this));
|
||||
gateway.disableWithdrawals();
|
||||
assertBoolEq(false, gateway.isWithdrawalsEnabled());
|
||||
hevm.expectRevert(ErrorWithdrawalsDisabled.selector);
|
||||
gateway.disableWithdrawals();
|
||||
|
||||
// revert when caller is not deposits disabler
|
||||
gateway.grantRole(gateway.WITHDRAWALS_ENABLER_ROLE(), address(this));
|
||||
gateway.enableWithdrawals();
|
||||
assertBoolEq(true, gateway.isWithdrawalsEnabled());
|
||||
gateway.revokeRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(this));
|
||||
hevm.expectRevert(ErrorCallerIsNotWithdrawalsDisabler.selector);
|
||||
gateway.disableWithdrawals();
|
||||
|
||||
// succeed
|
||||
gateway.grantRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(this));
|
||||
assertBoolEq(true, gateway.isWithdrawalsEnabled());
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit WithdrawalsDisabled(address(this));
|
||||
gateway.disableWithdrawals();
|
||||
assertBoolEq(false, gateway.isWithdrawalsEnabled());
|
||||
}
|
||||
|
||||
function testGrantRole(bytes32 _role, address _account) external {
|
||||
// revert not owner
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
gateway.grantRole(_role, _account);
|
||||
hevm.stopPrank();
|
||||
|
||||
// succeed
|
||||
assertBoolEq(gateway.hasRole(_role, _account), false);
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit RoleGranted(_role, _account, address(this));
|
||||
gateway.grantRole(_role, _account);
|
||||
assertBoolEq(gateway.hasRole(_role, _account), true);
|
||||
assertEq(gateway.getRoleMemberCount(_role), 1);
|
||||
assertEq(gateway.getRoleMember(_role, 0), _account);
|
||||
|
||||
// do nothing regrant
|
||||
gateway.grantRole(_role, _account);
|
||||
assertBoolEq(gateway.hasRole(_role, _account), true);
|
||||
assertEq(gateway.getRoleMemberCount(_role), 1);
|
||||
assertEq(gateway.getRoleMember(_role, 0), _account);
|
||||
}
|
||||
|
||||
function testRevokeRole(bytes32 _role, address _account) external {
|
||||
// revert not owner
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
gateway.revokeRole(_role, _account);
|
||||
hevm.stopPrank();
|
||||
|
||||
// grant first
|
||||
gateway.grantRole(_role, _account);
|
||||
assertBoolEq(gateway.hasRole(_role, _account), true);
|
||||
assertEq(gateway.getRoleMemberCount(_role), 1);
|
||||
assertEq(gateway.getRoleMember(_role, 0), _account);
|
||||
|
||||
// revoke
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit RoleRevoked(_role, _account, address(this));
|
||||
gateway.revokeRole(_role, _account);
|
||||
assertBoolEq(gateway.hasRole(_role, _account), false);
|
||||
assertEq(gateway.getRoleMemberCount(_role), 0);
|
||||
|
||||
// revoke again
|
||||
gateway.revokeRole(_role, _account);
|
||||
assertBoolEq(gateway.hasRole(_role, _account), false);
|
||||
assertEq(gateway.getRoleMemberCount(_role), 0);
|
||||
}
|
||||
|
||||
/********************************
|
||||
* Functions from L2LidoGateway *
|
||||
********************************/
|
||||
|
||||
function testGetL1ERC20Address(address token) external {
|
||||
hevm.assume(token != address(l2Token));
|
||||
hevm.expectRevert(ErrorUnsupportedL2Token.selector);
|
||||
gateway.getL1ERC20Address(token);
|
||||
}
|
||||
|
||||
function testGetL2ERC20Address(address token) external {
|
||||
hevm.assume(token != address(l1Token));
|
||||
hevm.expectRevert(ErrorUnsupportedL1Token.selector);
|
||||
gateway.getL2ERC20Address(token);
|
||||
}
|
||||
|
||||
function testWithdrawERC20(uint256 amount, uint256 gasLimit) external {
|
||||
_withdrawERC20(false, 0, amount, address(this), new bytes(0), gasLimit);
|
||||
}
|
||||
|
||||
function testWithdrawERC20WithRecipient(
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
uint256 gasLimit
|
||||
) external {
|
||||
_withdrawERC20(false, 1, amount, recipient, new bytes(0), gasLimit);
|
||||
}
|
||||
|
||||
function testWithdrawERC20WithRecipientAndCalldata(
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
bytes memory dataToCall,
|
||||
uint256 gasLimit
|
||||
) external {
|
||||
_withdrawERC20(false, 2, amount, recipient, dataToCall, gasLimit);
|
||||
}
|
||||
|
||||
function testWithdrawERC20ByRouter(uint256 amount, uint256 gasLimit) external {
|
||||
_withdrawERC20(true, 0, amount, address(this), new bytes(0), gasLimit);
|
||||
}
|
||||
|
||||
function testWithdrawERC20WithRecipientByRouter(
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
uint256 gasLimit
|
||||
) external {
|
||||
_withdrawERC20(true, 1, amount, recipient, new bytes(0), gasLimit);
|
||||
}
|
||||
|
||||
function testWithdrawERC20WithRecipientAndCalldataByRouter(
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
bytes memory dataToCall,
|
||||
uint256 gasLimit
|
||||
) external {
|
||||
_withdrawERC20(true, 2, amount, recipient, dataToCall, gasLimit);
|
||||
}
|
||||
|
||||
function testFinalizeDepositERC20(
|
||||
address sender,
|
||||
uint256 amount,
|
||||
bytes memory dataToCall
|
||||
) external {
|
||||
amount = bound(amount, 1, l2Token.balanceOf(address(this)));
|
||||
MockGatewayRecipient recipient = new MockGatewayRecipient();
|
||||
bytes memory message = abi.encodeCall(
|
||||
IL2ERC20Gateway.finalizeDepositERC20,
|
||||
(address(l1Token), address(l2Token), sender, address(recipient), amount, dataToCall)
|
||||
);
|
||||
|
||||
MockScrollMessenger mockMessenger = new MockScrollMessenger();
|
||||
MockL2LidoGateway mockGateway = _deployGateway(address(mockMessenger));
|
||||
mockGateway.initialize(address(counterpartGateway), address(router), address(mockMessenger));
|
||||
mockGateway.initializeV2(address(0), address(0), address(0), address(0));
|
||||
|
||||
// revert caller is not messenger
|
||||
hevm.expectRevert(ErrorCallerIsNotMessenger.selector);
|
||||
mockGateway.finalizeDepositERC20(
|
||||
address(l1Token),
|
||||
address(l2Token),
|
||||
sender,
|
||||
address(recipient),
|
||||
amount,
|
||||
dataToCall
|
||||
);
|
||||
|
||||
// revert not called by counterpart
|
||||
hevm.expectRevert(ErrorCallerIsNotCounterpartGateway.selector);
|
||||
mockMessenger.callTarget(address(mockGateway), message);
|
||||
|
||||
// revert when reentrant
|
||||
mockMessenger.setXDomainMessageSender(address(counterpartGateway));
|
||||
hevm.expectRevert("ReentrancyGuard: reentrant call");
|
||||
mockGateway.reentrantCall(
|
||||
address(mockMessenger),
|
||||
abi.encodeCall(mockMessenger.callTarget, (address(mockGateway), message))
|
||||
);
|
||||
|
||||
// revert when l1 token not supported
|
||||
hevm.expectRevert(ErrorUnsupportedL1Token.selector);
|
||||
mockMessenger.callTarget(
|
||||
address(mockGateway),
|
||||
abi.encodeCall(
|
||||
IL2ERC20Gateway.finalizeDepositERC20,
|
||||
(address(l2Token), address(l2Token), sender, address(recipient), amount, dataToCall)
|
||||
)
|
||||
);
|
||||
|
||||
// revert when l2 token not supported
|
||||
hevm.expectRevert(ErrorUnsupportedL2Token.selector);
|
||||
mockMessenger.callTarget(
|
||||
address(mockGateway),
|
||||
abi.encodeCall(
|
||||
IL2ERC20Gateway.finalizeDepositERC20,
|
||||
(address(l1Token), address(l1Token), sender, address(recipient), amount, dataToCall)
|
||||
)
|
||||
);
|
||||
|
||||
// revert when deposits disabled
|
||||
mockGateway.grantRole(gateway.DEPOSITS_DISABLER_ROLE(), address(this));
|
||||
mockGateway.disableDeposits();
|
||||
hevm.expectRevert(ErrorDepositsDisabled.selector);
|
||||
mockMessenger.callTarget(address(mockGateway), message);
|
||||
|
||||
// revert when nonzero msg.value
|
||||
mockGateway.grantRole(gateway.DEPOSITS_ENABLER_ROLE(), address(this));
|
||||
mockGateway.enableDeposits();
|
||||
hevm.expectRevert(ErrorNonZeroMsgValue.selector);
|
||||
mockMessenger.callTarget{value: 1}(address(mockGateway), message);
|
||||
|
||||
// succeed when finialize
|
||||
bytes memory xDomainCalldata = abi.encodeCall(
|
||||
l2Messenger.relayMessage,
|
||||
(address(counterpartGateway), address(gateway), 0, 0, message)
|
||||
);
|
||||
|
||||
// should emit FinalizeDepositERC20 from L2LidoGateway
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit FinalizeDepositERC20(address(l1Token), address(l2Token), sender, address(recipient), amount, dataToCall);
|
||||
|
||||
// should emit RelayedMessage from L2ScrollMessenger
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit RelayedMessage(keccak256(xDomainCalldata));
|
||||
|
||||
uint256 gatewayBalance = l2Token.balanceOf(address(gateway));
|
||||
uint256 recipientBalance = l2Token.balanceOf(address(recipient));
|
||||
assertBoolEq(false, l2Messenger.isL1MessageExecuted(keccak256(xDomainCalldata)));
|
||||
hevm.startPrank(AddressAliasHelper.applyL1ToL2Alias(address(l1Messenger)));
|
||||
l2Messenger.relayMessage(address(counterpartGateway), address(gateway), 0, 0, message);
|
||||
hevm.stopPrank();
|
||||
assertBoolEq(true, l2Messenger.isL1MessageExecuted(keccak256(xDomainCalldata))); // executed
|
||||
assertEq(recipientBalance + amount, l2Token.balanceOf(address(recipient))); // mint token
|
||||
assertEq(gatewayBalance, l2Token.balanceOf(address(gateway))); // gateway balance unchanged
|
||||
}
|
||||
|
||||
function _withdrawERC20(
|
||||
bool useRouter,
|
||||
uint256 methodType,
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
bytes memory dataToCall,
|
||||
uint256 gasLimit
|
||||
) private {
|
||||
hevm.assume(recipient != address(0));
|
||||
amount = bound(amount, 1, l2Token.balanceOf(address(this)));
|
||||
|
||||
// revert when reentrant
|
||||
hevm.expectRevert("ReentrancyGuard: reentrant call");
|
||||
bytes memory reentrantData;
|
||||
if (methodType == 0) {
|
||||
reentrantData = abi.encodeWithSignature(
|
||||
"withdrawERC20(address,uint256,uint256)",
|
||||
address(l2Token),
|
||||
amount,
|
||||
gasLimit
|
||||
);
|
||||
} else if (methodType == 1) {
|
||||
reentrantData = abi.encodeWithSignature(
|
||||
"withdrawERC20(address,address,uint256,uint256)",
|
||||
address(l2Token),
|
||||
recipient,
|
||||
amount,
|
||||
gasLimit
|
||||
);
|
||||
} else if (methodType == 2) {
|
||||
reentrantData = abi.encodeCall(
|
||||
IL2ERC20Gateway.withdrawERC20AndCall,
|
||||
(address(l2Token), recipient, amount, dataToCall, gasLimit)
|
||||
);
|
||||
}
|
||||
gateway.reentrantCall(useRouter ? address(router) : address(gateway), reentrantData);
|
||||
|
||||
// revert when l2 token not support
|
||||
hevm.expectRevert(ErrorUnsupportedL2Token.selector);
|
||||
_invokeWithdrawERC20Call(useRouter, methodType, address(l1Token), amount, recipient, dataToCall, gasLimit);
|
||||
|
||||
// revert when to is zero address
|
||||
if (methodType != 0) {
|
||||
hevm.expectRevert(ErrorAccountIsZeroAddress.selector);
|
||||
_invokeWithdrawERC20Call(useRouter, methodType, address(l2Token), amount, address(0), dataToCall, gasLimit);
|
||||
}
|
||||
|
||||
// revert when withdrawals disabled
|
||||
gateway.grantRole(gateway.WITHDRAWALS_DISABLER_ROLE(), address(this));
|
||||
gateway.disableWithdrawals();
|
||||
hevm.expectRevert(ErrorWithdrawalsDisabled.selector);
|
||||
_invokeWithdrawERC20Call(useRouter, methodType, address(l2Token), amount, recipient, dataToCall, gasLimit);
|
||||
|
||||
// revert when withdraw zero amount
|
||||
gateway.grantRole(gateway.WITHDRAWALS_ENABLER_ROLE(), address(this));
|
||||
gateway.enableWithdrawals();
|
||||
hevm.expectRevert(ErrorWithdrawZeroAmount.selector);
|
||||
_invokeWithdrawERC20Call(useRouter, methodType, address(l2Token), 0, recipient, dataToCall, gasLimit);
|
||||
|
||||
// revert when data is not empty
|
||||
if (dataToCall.length != 0) {
|
||||
hevm.expectRevert(WithdrawAndCallIsNotAllowed.selector);
|
||||
_invokeWithdrawERC20Call(useRouter, methodType, address(l2Token), amount, recipient, dataToCall, gasLimit);
|
||||
return;
|
||||
}
|
||||
|
||||
// succeed to withdraw
|
||||
bytes memory message = abi.encodeCall(
|
||||
IL1ERC20Gateway.finalizeWithdrawERC20,
|
||||
(address(l1Token), address(l2Token), address(this), recipient, amount, dataToCall)
|
||||
);
|
||||
bytes memory xDomainCalldata = abi.encodeCall(
|
||||
l2Messenger.relayMessage,
|
||||
(address(gateway), address(counterpartGateway), 0, 0, message)
|
||||
);
|
||||
// should emit AppendMessage from L2MessageQueue
|
||||
hevm.expectEmit(false, false, false, true);
|
||||
emit AppendMessage(0, keccak256(xDomainCalldata));
|
||||
|
||||
// should emit SentMessage from L2ScrollMessenger
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
emit SentMessage(address(gateway), address(counterpartGateway), 0, 0, gasLimit, message);
|
||||
|
||||
// should emit WithdrawERC20 from L2LidoGateway
|
||||
hevm.expectEmit(true, true, true, true);
|
||||
emit WithdrawERC20(address(l1Token), address(l2Token), address(this), recipient, amount, dataToCall);
|
||||
|
||||
uint256 gatewayBalance = l2Token.balanceOf(address(gateway));
|
||||
uint256 thisBalance = l2Token.balanceOf(address(this));
|
||||
assertEq(l2Messenger.messageSendTimestamp(keccak256(xDomainCalldata)), 0);
|
||||
_invokeWithdrawERC20Call(useRouter, methodType, address(l2Token), amount, recipient, dataToCall, gasLimit);
|
||||
assertGt(l2Messenger.messageSendTimestamp(keccak256(xDomainCalldata)), 0);
|
||||
assertEq(thisBalance - amount, l2Token.balanceOf(address(this)));
|
||||
assertEq(gatewayBalance, l2Token.balanceOf(address(gateway)));
|
||||
}
|
||||
|
||||
function _invokeWithdrawERC20Call(
|
||||
bool useRouter,
|
||||
uint256 methodType,
|
||||
address token,
|
||||
uint256 amount,
|
||||
address recipient,
|
||||
bytes memory dataToCall,
|
||||
uint256 gasLimit
|
||||
) private {
|
||||
if (useRouter) {
|
||||
if (methodType == 0) {
|
||||
router.withdrawERC20(token, amount, gasLimit);
|
||||
} else if (methodType == 1) {
|
||||
router.withdrawERC20(token, recipient, amount, gasLimit);
|
||||
} else if (methodType == 2) {
|
||||
router.withdrawERC20AndCall(token, recipient, amount, dataToCall, gasLimit);
|
||||
}
|
||||
} else {
|
||||
if (methodType == 0) {
|
||||
gateway.withdrawERC20(token, amount, gasLimit);
|
||||
} else if (methodType == 1) {
|
||||
gateway.withdrawERC20(token, recipient, amount, gasLimit);
|
||||
} else if (methodType == 2) {
|
||||
gateway.withdrawERC20AndCall(token, recipient, amount, dataToCall, gasLimit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function _deployGateway(address messenger) internal returns (MockL2LidoGateway _gateway) {
|
||||
_gateway = MockL2LidoGateway(_deployProxy(address(0)));
|
||||
|
||||
admin.upgrade(
|
||||
ITransparentUpgradeableProxy(address(_gateway)),
|
||||
address(
|
||||
new MockL2LidoGateway(
|
||||
address(l1Token),
|
||||
address(l2Token),
|
||||
address(counterpartGateway),
|
||||
address(router),
|
||||
address(messenger)
|
||||
)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
69
contracts/src/test/lido/L2WstETHToken.t.sol
Normal file
69
contracts/src/test/lido/L2WstETHToken.t.sol
Normal file
@@ -0,0 +1,69 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import {IERC1271Upgradeable} from "@openzeppelin/contracts-upgradeable/interfaces/IERC1271Upgradeable.sol";
|
||||
import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol";
|
||||
|
||||
import {L2WstETHToken} from "../../lido/L2WstETHToken.sol";
|
||||
|
||||
contract L2WstETHTokenTest is DSTestPlus {
|
||||
L2WstETHToken private counterpart;
|
||||
L2WstETHToken private token;
|
||||
|
||||
bytes4 private _magicValue;
|
||||
bool private revertOnSignature;
|
||||
|
||||
function setUp() public {
|
||||
hevm.warp(1000); // make block timestamp nonzero
|
||||
|
||||
counterpart = new L2WstETHToken();
|
||||
token = L2WstETHToken(address(new ERC1967Proxy(address(new L2WstETHToken()), new bytes(0))));
|
||||
|
||||
token.initialize("Wrapped liquid staked Ether 2.0", "wstETH", 18, address(this), address(counterpart));
|
||||
}
|
||||
|
||||
function testInitialize() external {
|
||||
assertEq(token.name(), "Wrapped liquid staked Ether 2.0");
|
||||
assertEq(token.symbol(), "wstETH");
|
||||
assertEq(token.decimals(), 18);
|
||||
assertEq(token.gateway(), address(this));
|
||||
assertEq(token.counterpart(), address(counterpart));
|
||||
}
|
||||
|
||||
function testPermit(uint256 amount) external {
|
||||
uint256 timestamp = block.timestamp;
|
||||
// revert when expire
|
||||
hevm.expectRevert("ERC20Permit: expired deadline");
|
||||
token.permit(address(this), address(counterpart), 1, timestamp - 1, 0, 0, 0);
|
||||
|
||||
// revert when invalid contract signature
|
||||
hevm.expectRevert("ERC20Permit: invalid signature");
|
||||
_magicValue = bytes4(0);
|
||||
revertOnSignature = false;
|
||||
token.permit(address(this), address(counterpart), 1, timestamp, 0, 0, 0);
|
||||
|
||||
// revert when invalid contract signature
|
||||
hevm.expectRevert("ERC20Permit: invalid signature");
|
||||
_magicValue = IERC1271Upgradeable.isValidSignature.selector;
|
||||
revertOnSignature = true;
|
||||
token.permit(address(this), address(counterpart), 1, timestamp, 0, 0, 0);
|
||||
|
||||
// succeed on contract signer
|
||||
_magicValue = IERC1271Upgradeable.isValidSignature.selector;
|
||||
revertOnSignature = false;
|
||||
assertEq(token.allowance(address(this), address(counterpart)), 0);
|
||||
token.permit(address(this), address(counterpart), amount, timestamp, 0, 0, 0);
|
||||
assertEq(token.allowance(address(this), address(counterpart)), amount);
|
||||
}
|
||||
|
||||
function isValidSignature(bytes32 hash, bytes memory signature) external view returns (bytes4 magicValue) {
|
||||
if (revertOnSignature) {
|
||||
revert("revert");
|
||||
}
|
||||
|
||||
magicValue = _magicValue;
|
||||
}
|
||||
}
|
||||
28
contracts/src/test/mocks/MockL1LidoGateway.sol
Normal file
28
contracts/src/test/mocks/MockL1LidoGateway.sol
Normal file
@@ -0,0 +1,28 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {L1LidoGateway} from "../../lido/L1LidoGateway.sol";
|
||||
|
||||
contract MockL1LidoGateway is L1LidoGateway {
|
||||
constructor(
|
||||
address _l1Token,
|
||||
address _l2Token,
|
||||
address _counterpart,
|
||||
address _router,
|
||||
address _messenger
|
||||
) L1LidoGateway(_l1Token, _l2Token, _counterpart, _router, _messenger) {}
|
||||
|
||||
function reentrantCall(address target, bytes calldata data) external payable nonReentrant {
|
||||
(bool success, ) = target.call{value: msg.value}(data);
|
||||
if (!success) {
|
||||
// solhint-disable-next-line no-inline-assembly
|
||||
assembly {
|
||||
let ptr := mload(0x40)
|
||||
let size := returndatasize()
|
||||
returndatacopy(ptr, 0, size)
|
||||
revert(ptr, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
28
contracts/src/test/mocks/MockL2LidoGateway.sol
Normal file
28
contracts/src/test/mocks/MockL2LidoGateway.sol
Normal file
@@ -0,0 +1,28 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {L2LidoGateway} from "../../lido/L2LidoGateway.sol";
|
||||
|
||||
contract MockL2LidoGateway is L2LidoGateway {
|
||||
constructor(
|
||||
address _l1Token,
|
||||
address _l2Token,
|
||||
address _counterpart,
|
||||
address _router,
|
||||
address _messenger
|
||||
) L2LidoGateway(_l1Token, _l2Token, _counterpart, _router, _messenger) {}
|
||||
|
||||
function reentrantCall(address target, bytes calldata data) external payable nonReentrant {
|
||||
(bool success, ) = target.call{value: msg.value}(data);
|
||||
if (!success) {
|
||||
// solhint-disable-next-line no-inline-assembly
|
||||
assembly {
|
||||
let ptr := mload(0x40)
|
||||
let size := returndatasize()
|
||||
returndatacopy(ptr, 0, size)
|
||||
revert(ptr, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, 14, int(cur))
|
||||
assert.Equal(t, 15, int(cur))
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
|
||||
46
database/migrate/migrations/00015_pending_transaction.sql
Normal file
46
database/migrate/migrations/00015_pending_transaction.sql
Normal file
@@ -0,0 +1,46 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
CREATE TABLE pending_transaction
|
||||
(
|
||||
id SERIAL PRIMARY KEY,
|
||||
|
||||
-- context info
|
||||
context_id VARCHAR NOT NULL, -- batch hash in commit/finalize tx, block hash in update gas oracle tx
|
||||
hash VARCHAR NOT NULL,
|
||||
status SMALLINT NOT NULL,
|
||||
rlp_encoding BYTEA NOT NULL,
|
||||
|
||||
-- debug info
|
||||
chain_id BIGINT NOT NULL,
|
||||
type SMALLINT NOT NULL,
|
||||
gas_tip_cap BIGINT NOT NULL,
|
||||
gas_fee_cap BIGINT NOT NULL, -- based on geth's implementation, it's gas price in legacy tx.
|
||||
gas_limit BIGINT NOT NULL,
|
||||
nonce BIGINT NOT NULL,
|
||||
submit_block_number BIGINT NOT NULL,
|
||||
|
||||
-- sender info
|
||||
sender_name VARCHAR NOT NULL,
|
||||
sender_service VARCHAR NOT NULL,
|
||||
sender_address VARCHAR NOT NULL,
|
||||
sender_type SMALLINT NOT NULL,
|
||||
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX unique_idx_pending_transaction_on_hash ON pending_transaction(hash);
|
||||
CREATE INDEX idx_pending_transaction_on_sender_type_status_nonce_gas_fee_cap ON pending_transaction (sender_type, status, nonce, gas_fee_cap);
|
||||
CREATE INDEX idx_pending_transaction_on_sender_address_nonce ON pending_transaction(sender_address, nonce);
|
||||
|
||||
COMMENT ON COLUMN pending_transaction.sender_type IS 'unknown, commit batch, finalize batch, L1 gas oracle, L2 gas oracle';
|
||||
COMMENT ON COLUMN pending_transaction.status IS 'unknown, pending, replaced, confirmed, confirmed failed';
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE IF EXISTS pending_transaction;
|
||||
-- +goose StatementEnd
|
||||
23
go.work.sum
23
go.work.sum
@@ -488,13 +488,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
@@ -564,7 +558,6 @@ github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZn
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
|
||||
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU=
|
||||
@@ -857,6 +850,7 @@ github.com/onsi/ginkgo/v2 v2.8.1 h1:xFTEVwOFa1D/Ty24Ws1npBWkDYEV9BqZrsDxVrVkrrU=
|
||||
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
|
||||
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
|
||||
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
|
||||
@@ -980,6 +974,12 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
|
||||
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
|
||||
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
@@ -1141,6 +1141,7 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -1154,9 +1155,11 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
@@ -1195,9 +1198,12 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1211,10 +1217,12 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1273,6 +1281,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
|
||||
@@ -56,7 +56,7 @@ func action(ctx *cli.Context) error {
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
log.Crit("failed to close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -64,14 +64,12 @@ func action(ctx *cli.Context) error {
|
||||
observability.Server(ctx, db)
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
|
||||
@@ -59,7 +59,7 @@ func action(ctx *cli.Context) error {
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
log.Crit("failed to close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -68,28 +68,24 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
log.Crit("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
// Start l1 watcher process
|
||||
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
|
||||
@@ -103,6 +99,7 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
if loopErr = l1watcher.FetchBlockHeader(number - 1); loopErr != nil {
|
||||
log.Error("Failed to fetch L1 block header", "lastest", number-1, "err", loopErr)
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -88,9 +88,9 @@ func (b *MockApp) MockConfig(store bool) error {
|
||||
}
|
||||
|
||||
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.DBConfig.DSN = base.DBImg.Endpoint()
|
||||
b.Config = cfg
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ func action(ctx *cli.Context) error {
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
log.Crit("failed to close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -69,27 +69,23 @@ func action(ctx *cli.Context) error {
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
log.Crit("failed to create chunkProposer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
log.Crit("failed to create batchProposer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
|
||||
|
||||
@@ -1,24 +1,21 @@
|
||||
{
|
||||
"l1_config": {
|
||||
"confirmations": "0x6",
|
||||
"endpoint": "DUMMY_ENDPOINT",
|
||||
"endpoint": "https://rpc.ankr.com/eth",
|
||||
"l1_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"scroll_chain_address": "0x0000000000000000000000000000000000000000",
|
||||
"start_height": 0,
|
||||
"relayer_config": {
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "https://sepolia-rpc.scroll.io",
|
||||
"check_pending_time": 2,
|
||||
"check_balance_time": 100,
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"escalate_blocks": 100,
|
||||
"confirmations": "0x1",
|
||||
"escalate_multiple_num": 11,
|
||||
"escalate_multiple_den": 10,
|
||||
"max_gas_price": 10000000000,
|
||||
"tx_type": "LegacyTx",
|
||||
"min_balance": 100000000000000000000,
|
||||
"pending_limit": 10
|
||||
"check_pending_time": 3
|
||||
},
|
||||
"gas_oracle_config": {
|
||||
"min_gas_price": 0,
|
||||
@@ -29,24 +26,21 @@
|
||||
},
|
||||
"l2_config": {
|
||||
"confirmations": "0x1",
|
||||
"endpoint": "https://sepolia-rpc.scroll.io",
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "https://sepolia-rpc.scroll.io",
|
||||
"check_pending_time": 10,
|
||||
"check_balance_time": 100,
|
||||
"endpoint": "https://rpc.ankr.com/eth",
|
||||
"escalate_blocks": 100,
|
||||
"confirmations": "0x6",
|
||||
"escalate_multiple_num": 11,
|
||||
"escalate_multiple_den": 10,
|
||||
"max_gas_price": 10000000000,
|
||||
"tx_type": "LegacyTx",
|
||||
"min_balance": 100000000000000000000,
|
||||
"pending_limit": 10
|
||||
"tx_type": "DynamicFeeTx",
|
||||
"check_pending_time": 12
|
||||
},
|
||||
"gas_oracle_config": {
|
||||
"min_gas_price": 0,
|
||||
|
||||
@@ -6,7 +6,6 @@ require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
@@ -40,6 +39,7 @@ require (
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
@@ -71,6 +71,7 @@ require (
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/smartystreets/assertions v1.13.1 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
|
||||
@@ -40,6 +40,8 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
@@ -64,12 +66,21 @@ github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -83,6 +94,7 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
@@ -132,12 +144,18 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
@@ -190,6 +208,7 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
@@ -217,6 +236,8 @@ golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||
@@ -228,7 +249,13 @@ golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -240,6 +267,8 @@ golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
@@ -248,6 +277,14 @@ golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
@@ -257,9 +294,13 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
@@ -29,12 +28,6 @@ type SenderConfig struct {
|
||||
MaxGasPrice uint64 `json:"max_gas_price"`
|
||||
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
|
||||
TxType string `json:"tx_type"`
|
||||
// The min balance set for check and set balance for sender's accounts.
|
||||
MinBalance *big.Int `json:"min_balance"`
|
||||
// The interval (in seconds) to check balance and top up sender's accounts
|
||||
CheckBalanceTime uint64 `json:"check_balance_time"`
|
||||
// The sender's pending count limit.
|
||||
PendingLimit int `json:"pending_limit"`
|
||||
}
|
||||
|
||||
// ChainMonitor this config is used to get batch status from chain_monitor API.
|
||||
|
||||
@@ -2,7 +2,6 @@ package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
@@ -44,7 +43,7 @@ type Layer1Relayer struct {
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, reg prometheus.Registerer) (*Layer1Relayer, error) {
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", reg)
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", types.SenderTypeL1GasOracle, db, reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
@@ -118,9 +117,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
}
|
||||
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -136,28 +133,38 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) handleConfirmation(cfm *sender.Confirmation) {
|
||||
switch cfm.SenderType {
|
||||
case types.SenderTypeL1GasOracle:
|
||||
var status types.GasOracleStatus
|
||||
if cfm.IsSuccessful {
|
||||
status = types.GasOracleImported
|
||||
r.metrics.rollupL1UpdateGasOracleConfirmedTotal.Inc()
|
||||
log.Info("UpdateGasOracleTxType transaction confirmed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
status = types.GasOracleImportedFailed
|
||||
r.metrics.rollupL1UpdateGasOracleConfirmedFailedTotal.Inc()
|
||||
log.Warn("UpdateGasOracleTxType transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
}
|
||||
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ContextID, status, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "confirmation", cfm, "err", err)
|
||||
}
|
||||
default:
|
||||
log.Warn("Unknown transaction type", "confirmation", cfm)
|
||||
}
|
||||
|
||||
log.Info("Transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.rollupL1GasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
r.handleConfirmation(cfm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,12 +8,10 @@ import (
|
||||
)
|
||||
|
||||
type l1RelayerMetrics struct {
|
||||
rollupL1RelayedMsgsTotal prometheus.Counter
|
||||
rollupL1RelayedMsgsFailureTotal prometheus.Counter
|
||||
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL1RelayerLastGasPrice prometheus.Gauge
|
||||
rollupL1MsgsRelayedConfirmedTotal prometheus.Counter
|
||||
rollupL1GasOraclerConfirmedTotal prometheus.Counter
|
||||
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL1RelayerLastGasPrice prometheus.Gauge
|
||||
rollupL1UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL1UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -24,18 +22,6 @@ var (
|
||||
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
initL1RelayerMetricOnce.Do(func() {
|
||||
l1RelayerMetric = &l1RelayerMetrics{
|
||||
rollupL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_msg_relayed_total",
|
||||
Help: "The total number of the l1 relayed message.",
|
||||
}),
|
||||
rollupL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_msg_relayed_failure_total",
|
||||
Help: "The total number of the l1 relayed failure message.",
|
||||
}),
|
||||
rollupL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_relayed_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
rollupL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_gas_price_oracler_total",
|
||||
Help: "The total number of layer1 gas price oracler run total",
|
||||
@@ -44,9 +30,13 @@ func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
Name: "rollup_layer1_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of rollup relayer l1",
|
||||
}),
|
||||
rollupL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_gas_oracler_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
rollupL1UpdateGasOracleConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_update_gas_oracle_confirmed_total",
|
||||
Help: "The total number of updating layer1 gas oracle confirmed",
|
||||
}),
|
||||
rollupL1UpdateGasOracleConfirmedFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_update_gas_oracle_confirmed_failed_total",
|
||||
Help: "The total number of updating layer1 gas oracle confirmed failed",
|
||||
}),
|
||||
}
|
||||
})
|
||||
|
||||
@@ -61,12 +61,14 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
|
||||
// Simulate message confirmations.
|
||||
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "gas-oracle-1",
|
||||
ContextID: "gas-oracle-1",
|
||||
IsSuccessful: true,
|
||||
SenderType: types.SenderTypeL1GasOracle,
|
||||
})
|
||||
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "gas-oracle-2",
|
||||
ContextID: "gas-oracle-2",
|
||||
IsSuccessful: false,
|
||||
SenderType: types.SenderTypeL1GasOracle,
|
||||
})
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
@@ -74,7 +76,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
msg1, err1 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-1"})
|
||||
msg2, err2 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-2"})
|
||||
return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
|
||||
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed
|
||||
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleImportedFailed
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
@@ -2,11 +2,9 @@ package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
@@ -60,31 +58,23 @@ type Layer2Relayer struct {
|
||||
// Used to get batch status from chain_monitor api.
|
||||
chainMonitorClient *resty.Client
|
||||
|
||||
// A list of processing batches commitment.
|
||||
// key(string): confirmation ID, value(string): batch hash.
|
||||
processingCommitment sync.Map
|
||||
|
||||
// A list of processing batch finalization.
|
||||
// key(string): confirmation ID, value(string): batch hash.
|
||||
processingFinalization sync.Map
|
||||
|
||||
metrics *l2RelayerMetrics
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", reg)
|
||||
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", types.SenderTypeCommitBatch, db, reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new commit sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey, "l2_relayer", "finalize_sender", reg)
|
||||
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey, "l2_relayer", "finalize_sender", types.SenderTypeFinalizeBatch, db, reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.FinalizeSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new finalize sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l2_relayer", "gas_oracle_sender", reg)
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l2_relayer", "gas_oracle_sender", types.SenderTypeL2GasOracle, db, reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
@@ -125,9 +115,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
|
||||
cfg: cfg,
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
// chain_monitor client
|
||||
@@ -253,8 +241,8 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
|
||||
// handle confirmation
|
||||
case confirmation := <-r.commitSender.ConfirmChan():
|
||||
if confirmation.ID != batchHash {
|
||||
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ID)
|
||||
if confirmation.ContextID != batchHash {
|
||||
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ContextID)
|
||||
}
|
||||
if !confirmation.IsSuccessful {
|
||||
return fmt.Errorf("import genesis batch tx failed")
|
||||
@@ -293,9 +281,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
|
||||
}
|
||||
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -382,14 +368,13 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
}
|
||||
|
||||
// send transaction
|
||||
txID := batch.Hash + "-commit"
|
||||
fallbackGasLimit := uint64(float64(batch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
|
||||
if types.RollupStatus(batch.RollupStatus) == types.RollupCommitFailed {
|
||||
// use eth_estimateGas if this batch has been committed failed.
|
||||
fallbackGasLimit = 0
|
||||
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", batch.Hash)
|
||||
}
|
||||
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, fallbackGasLimit)
|
||||
txHash, err := r.commitSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, fallbackGasLimit)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
@@ -415,7 +400,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
return
|
||||
}
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
r.processingCommitment.Store(txID, batch.Hash)
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
|
||||
}
|
||||
}
|
||||
@@ -554,34 +538,27 @@ func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
txID := batch.Hash + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), txCalldata, 0)
|
||||
txHash, err := r.finalizeSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, big.NewInt(0), txCalldata, 0)
|
||||
finalizeTxHash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
// This can happen normally if we try to finalize 2 or more
|
||||
// batches around the same time. The 2nd tx might fail since
|
||||
// the client does not see the 1st tx's updates at this point.
|
||||
// TODO: add more fine-grained error handling
|
||||
log.Error(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(txCalldata),
|
||||
"err", err,
|
||||
)
|
||||
}
|
||||
log.Error(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(txCalldata),
|
||||
"err", err,
|
||||
)
|
||||
return err
|
||||
}
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", batch.Index, "batch hash", batch.Hash, "tx hash", batch.Hash)
|
||||
@@ -591,7 +568,6 @@ func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", batch.Index, "batch hash", batch.Hash, "tx hash", finalizeTxHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
r.processingFinalization.Store(txID, batch.Hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
return nil
|
||||
}
|
||||
@@ -642,53 +618,59 @@ func (r *Layer2Relayer) getBatchStatusByIndex(batch *orm.Batch) (bool, error) {
|
||||
return response.Data, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
transactionType := "Unknown"
|
||||
// check whether it is CommitBatches transaction
|
||||
if batchHash, ok := r.processingCommitment.Load(confirmation.ID); ok {
|
||||
transactionType = "BatchesCommitment"
|
||||
func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) {
|
||||
switch cfm.SenderType {
|
||||
case types.SenderTypeCommitBatch:
|
||||
var status types.RollupStatus
|
||||
if confirmation.IsSuccessful {
|
||||
if cfm.IsSuccessful {
|
||||
status = types.RollupCommitted
|
||||
r.metrics.rollupL2BatchesCommittedConfirmedTotal.Inc()
|
||||
} else {
|
||||
status = types.RollupCommitFailed
|
||||
r.metrics.rollupL2BatchesCommittedConfirmedFailedTotal.Inc()
|
||||
log.Warn("commitBatch transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
log.Warn("CommitBatchTxType transaction confirmed but failed in layer1", "confirmation", cfm)
|
||||
}
|
||||
// @todo handle db error
|
||||
err := r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
|
||||
if err != nil {
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed",
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
r.metrics.rollupL2BatchesCommittedConfirmedTotal.Inc()
|
||||
r.processingCommitment.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
// check whether it is proof finalization transaction
|
||||
if batchHash, ok := r.processingFinalization.Load(confirmation.ID); ok {
|
||||
transactionType = "ProofFinalization"
|
||||
err := r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, cfm.ContextID, cfm.TxHash.String(), status)
|
||||
if err != nil {
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "confirmation", cfm, "err", err)
|
||||
}
|
||||
case types.SenderTypeFinalizeBatch:
|
||||
var status types.RollupStatus
|
||||
if confirmation.IsSuccessful {
|
||||
if cfm.IsSuccessful {
|
||||
status = types.RollupFinalized
|
||||
r.metrics.rollupL2BatchesFinalizedConfirmedTotal.Inc()
|
||||
} else {
|
||||
status = types.RollupFinalizeFailed
|
||||
r.metrics.rollupL2BatchesFinalizedConfirmedFailedTotal.Inc()
|
||||
log.Warn("finalizeBatchWithProof transaction confirmed but failed in layer1", "confirmation", confirmation)
|
||||
log.Warn("FinalizeBatchTxType transaction confirmed but failed in layer1", "confirmation", cfm)
|
||||
}
|
||||
|
||||
// @todo handle db error
|
||||
err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
|
||||
err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, cfm.ContextID, cfm.TxHash.String(), status)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed",
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "confirmation", cfm, "err", err)
|
||||
}
|
||||
r.metrics.rollupL2BatchesFinalizedConfirmedTotal.Inc()
|
||||
r.processingFinalization.Delete(confirmation.ID)
|
||||
case types.SenderTypeL2GasOracle:
|
||||
batchHash := cfm.ContextID
|
||||
var status types.GasOracleStatus
|
||||
if cfm.IsSuccessful {
|
||||
status = types.GasOracleImported
|
||||
r.metrics.rollupL2UpdateGasOracleConfirmedTotal.Inc()
|
||||
} else {
|
||||
status = types.GasOracleImportedFailed
|
||||
r.metrics.rollupL2UpdateGasOracleConfirmedFailedTotal.Inc()
|
||||
log.Warn("UpdateGasOracleTxType transaction confirmed but failed in layer1", "confirmation", cfm)
|
||||
}
|
||||
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batchHash, status, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "confirmation", cfm, "err", err)
|
||||
}
|
||||
default:
|
||||
log.Warn("Unknown transaction type", "confirmation", cfm)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
|
||||
|
||||
log.Info("Transaction confirmed in layer1", "confirmation", cfm)
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
@@ -696,27 +678,12 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.commitSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.finalizeSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case cfm := <-r.commitSender.ConfirmChan():
|
||||
r.handleConfirmation(cfm)
|
||||
case cfm := <-r.finalizeSender.ConfirmChan():
|
||||
r.handleConfirmation(cfm)
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.rollupL2BatchesGasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "confirmation", cfm)
|
||||
}
|
||||
r.handleConfirmation(cfm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,8 @@ type l2RelayerMetrics struct {
|
||||
rollupL2BatchesCommittedConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2BatchesFinalizedConfirmedTotal prometheus.Counter
|
||||
rollupL2BatchesFinalizedConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2BatchesGasOraclerConfirmedTotal prometheus.Counter
|
||||
rollupL2UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL2UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2ChainMonitorLatestFailedCall prometheus.Counter
|
||||
rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
|
||||
}
|
||||
@@ -76,9 +77,13 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
Name: "rollup_layer2_process_finalized_batches_confirmed_failed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed failed total",
|
||||
}),
|
||||
rollupL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_process_gras_oracler_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
rollupL2UpdateGasOracleConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_update_layer1_gas_oracle_confirmed_total",
|
||||
Help: "The total number of updating layer2 gas oracle confirmed",
|
||||
}),
|
||||
rollupL2UpdateGasOracleConfirmedFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_update_layer1_gas_oracle_confirmed_failed_total",
|
||||
Help: "The total number of updating layer2 gas oracle confirmed failed",
|
||||
}),
|
||||
rollupL2ChainMonitorLatestFailedCall: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_chain_monitor_latest_failed_batch_call",
|
||||
|
||||
@@ -164,11 +164,9 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
processingKeys := []string{"committed-1", "committed-2"}
|
||||
isSuccessful := []bool{true, false}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(processingKeys))
|
||||
batchHashes := make([]string, len(isSuccessful))
|
||||
for i := range batchHashes {
|
||||
batchMeta := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
@@ -181,12 +179,12 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
batchHashes[i] = batch.Hash
|
||||
}
|
||||
|
||||
for i, key := range processingKeys {
|
||||
l2Relayer.processingCommitment.Store(key, batchHashes[i])
|
||||
for i, batchHash := range batchHashes {
|
||||
l2Relayer.commitSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
ContextID: batchHash,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
SenderType: types.SenderTypeCommitBatch,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -220,11 +218,9 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
processingKeys := []string{"finalized-1", "finalized-2"}
|
||||
isSuccessful := []bool{true, false}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(processingKeys))
|
||||
batchHashes := make([]string, len(isSuccessful))
|
||||
for i := range batchHashes {
|
||||
batchMeta := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
@@ -237,12 +233,12 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
batchHashes[i] = batch.Hash
|
||||
}
|
||||
|
||||
for i, key := range processingKeys {
|
||||
l2Relayer.processingFinalization.Store(key, batchHashes[i])
|
||||
for i, batchHash := range batchHashes {
|
||||
l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
ContextID: batchHash,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
SenderType: types.SenderTypeFinalizeBatch,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -307,13 +303,14 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
|
||||
for _, confirmation := range confirmations {
|
||||
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: confirmation.batchHash,
|
||||
ContextID: confirmation.batchHash,
|
||||
IsSuccessful: confirmation.isSuccessful,
|
||||
SenderType: types.SenderTypeL2GasOracle,
|
||||
})
|
||||
}
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed}
|
||||
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleImportedFailed}
|
||||
for i, confirmation := range confirmations {
|
||||
gasOracle, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": confirmation.batchHash}, nil, 0)
|
||||
if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] {
|
||||
@@ -378,13 +375,13 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
|
||||
convey.Convey("Failed to send setL2BaseFee tx to layer2", t, func() {
|
||||
targetErr := errors.New("failed to send setL2BaseFee tx to layer2 error")
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (hash common.Hash, err error) {
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (hash common.Hash, err error) {
|
||||
return common.Hash{}, targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (hash common.Hash, err error) {
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (hash common.Hash, err error) {
|
||||
return common.HexToHash("0x56789abcdef1234"), nil
|
||||
})
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
@@ -40,6 +41,10 @@ var (
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
// Load config.
|
||||
var err error
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
|
||||
@@ -1,29 +1,31 @@
|
||||
package sender
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
func (s *Sender) estimateLegacyGas(to *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("estimateLegacyGas SuggestGasPrice failure", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
gasLimit, err := s.estimateGasLimit(auth, contract, input, gasPrice, nil, nil, value)
|
||||
gasLimit, _, err := s.estimateGasLimit(to, data, gasPrice, nil, nil, value, false)
|
||||
if err != nil {
|
||||
log.Error("estimateLegacyGas estimateGasLimit failure", "gas price", gasPrice, "from", auth.From.Hex(),
|
||||
"nonce", auth.Nonce.Uint64(), "contract address", contract.Hex(), "fallback gas limit", fallbackGasLimit, "error", err)
|
||||
log.Error("estimateLegacyGas estimateGasLimit failure", "gas price", gasPrice, "from", s.auth.From.String(),
|
||||
"nonce", s.auth.Nonce.Uint64(), "to address", to.String(), "fallback gas limit", fallbackGasLimit, "error", err)
|
||||
if fallbackGasLimit == 0 {
|
||||
return nil, err
|
||||
}
|
||||
gasLimit = fallbackGasLimit
|
||||
} else {
|
||||
gasLimit = gasLimit * 12 / 10 // 20% extra gas to avoid out of gas error
|
||||
}
|
||||
return &FeeData{
|
||||
gasPrice: gasPrice,
|
||||
@@ -31,55 +33,97 @@ func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Add
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
func (s *Sender) estimateDynamicGas(to *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64, baseFee uint64) (*FeeData, error) {
|
||||
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("estimateDynamicGas SuggestGasTipCap failure", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseFee := big.NewInt(0)
|
||||
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
|
||||
baseFee.SetUint64(feeGas)
|
||||
}
|
||||
gasFeeCap := new(big.Int).Add(
|
||||
gasTipCap,
|
||||
new(big.Int).Mul(baseFee, big.NewInt(2)),
|
||||
)
|
||||
gasLimit, err := s.estimateGasLimit(auth, contract, input, nil, gasTipCap, gasFeeCap, value)
|
||||
gasFeeCap := new(big.Int).Add(gasTipCap, new(big.Int).Mul(new(big.Int).SetUint64(baseFee), big.NewInt(2)))
|
||||
gasLimit, accessList, err := s.estimateGasLimit(to, data, nil, gasTipCap, gasFeeCap, value, true)
|
||||
if err != nil {
|
||||
log.Error("estimateDynamicGas estimateGasLimit failure",
|
||||
"from", auth.From.Hex(), "nonce", auth.Nonce.Uint64(), "contract address", contract.Hex(),
|
||||
"from", s.auth.From.String(), "nonce", s.auth.Nonce.Uint64(), "to address", to.String(),
|
||||
"fallback gas limit", fallbackGasLimit, "error", err)
|
||||
if fallbackGasLimit == 0 {
|
||||
return nil, err
|
||||
}
|
||||
gasLimit = fallbackGasLimit
|
||||
} else {
|
||||
gasLimit = gasLimit * 12 / 10 // 20% extra gas to avoid out of gas error
|
||||
}
|
||||
return &FeeData{
|
||||
feeData := &FeeData{
|
||||
gasLimit: gasLimit,
|
||||
gasTipCap: gasTipCap,
|
||||
gasFeeCap: gasFeeCap,
|
||||
}, nil
|
||||
}
|
||||
if accessList != nil {
|
||||
feeData.accessList = *accessList
|
||||
}
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
func (s *Sender) estimateGasLimit(opts *bind.TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) {
|
||||
func (s *Sender) estimateGasLimit(to *common.Address, data []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int, useAccessList bool) (uint64, *types.AccessList, error) {
|
||||
msg := ethereum.CallMsg{
|
||||
From: opts.From,
|
||||
To: contract,
|
||||
From: s.auth.From,
|
||||
To: to,
|
||||
GasPrice: gasPrice,
|
||||
GasTipCap: gasTipCap,
|
||||
GasFeeCap: gasFeeCap,
|
||||
Value: value,
|
||||
Data: input,
|
||||
Data: data,
|
||||
}
|
||||
gasLimit, err := s.client.EstimateGas(s.ctx, msg)
|
||||
gasLimitWithoutAccessList, err := s.client.EstimateGas(s.ctx, msg)
|
||||
if err != nil {
|
||||
log.Error("estimateGasLimit EstimateGas failure", "error", err)
|
||||
return 0, err
|
||||
log.Error("estimateGasLimit EstimateGas failure without access list", "error", err)
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
gasLimit = gasLimit * 12 / 10 // 20% extra gas to avoid out of gas error
|
||||
if !useAccessList {
|
||||
return gasLimitWithoutAccessList, nil, nil
|
||||
}
|
||||
|
||||
return gasLimit, nil
|
||||
accessList, gasLimitWithAccessList, errStr, rpcErr := s.gethClient.CreateAccessList(s.ctx, msg)
|
||||
if rpcErr != nil {
|
||||
log.Error("CreateAccessList RPC error", "error", rpcErr)
|
||||
return gasLimitWithoutAccessList, nil, rpcErr
|
||||
}
|
||||
if errStr != "" {
|
||||
log.Error("CreateAccessList reported error", "error", errStr)
|
||||
return gasLimitWithoutAccessList, nil, fmt.Errorf(errStr)
|
||||
}
|
||||
|
||||
// Fine-tune accessList because 'to' address is automatically included in the access list by the Ethereum protocol: https://github.com/ethereum/go-ethereum/blob/v1.13.10/core/state/statedb.go#L1322
|
||||
// This function returns a gas estimation because GO SDK does not support access list: https://github.com/ethereum/go-ethereum/blob/v1.13.10/ethclient/ethclient.go#L642
|
||||
accessList, gasLimitWithAccessList = finetuneAccessList(accessList, gasLimitWithAccessList, msg.To)
|
||||
|
||||
log.Info("gas", "senderName", s.name, "senderService", s.service, "gasLimitWithAccessList", gasLimitWithAccessList, "gasLimitWithoutAccessList", gasLimitWithoutAccessList, "accessList", accessList)
|
||||
|
||||
if gasLimitWithAccessList < gasLimitWithoutAccessList {
|
||||
return gasLimitWithAccessList, accessList, nil
|
||||
}
|
||||
return gasLimitWithoutAccessList, nil, nil
|
||||
}
|
||||
|
||||
func finetuneAccessList(accessList *types.AccessList, gasLimitWithAccessList uint64, to *common.Address) (*types.AccessList, uint64) {
|
||||
if accessList == nil || to == nil {
|
||||
return accessList, gasLimitWithAccessList
|
||||
}
|
||||
|
||||
var newAccessList types.AccessList
|
||||
for _, entry := range *accessList {
|
||||
if entry.Address == *to && len(entry.StorageKeys) < 24 {
|
||||
// Based on: https://arxiv.org/pdf/2312.06574.pdf
|
||||
// We remove the address and respective storage keys as long as the number of storage keys < 24.
|
||||
// This removal helps in preventing double-counting of the 'to' address in access list calculations.
|
||||
gasLimitWithAccessList -= 2400
|
||||
// Each storage key saves 100 gas units.
|
||||
gasLimitWithAccessList += uint64(100 * len(entry.StorageKeys))
|
||||
} else {
|
||||
// Otherwise, keep the entry in the new access list.
|
||||
newAccessList = append(newAccessList, entry)
|
||||
}
|
||||
}
|
||||
return &newAccessList, gasLimitWithAccessList
|
||||
}
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
package sender
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type senderMetrics struct {
|
||||
senderCheckBalancerTotal *prometheus.CounterVec
|
||||
senderCheckPendingTransactionTotal *prometheus.CounterVec
|
||||
sendTransactionTotal *prometheus.CounterVec
|
||||
sendTransactionFailureFullTx *prometheus.GaugeVec
|
||||
sendTransactionFailureRepeatTransaction *prometheus.CounterVec
|
||||
sendTransactionFailureGetFee *prometheus.CounterVec
|
||||
sendTransactionFailureSendTx *prometheus.CounterVec
|
||||
resubmitTransactionTotal *prometheus.CounterVec
|
||||
currentPendingTxsNum *prometheus.GaugeVec
|
||||
currentGasFeeCap *prometheus.GaugeVec
|
||||
currentGasTipCap *prometheus.GaugeVec
|
||||
currentGasPrice *prometheus.GaugeVec
|
||||
currentGasLimit *prometheus.GaugeVec
|
||||
currentNonce *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
var (
|
||||
initSenderMetricOnce sync.Once
|
||||
sm *senderMetrics
|
||||
)
|
||||
|
||||
func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
|
||||
initSenderMetricOnce.Do(func() {
|
||||
sm = &senderMetrics{
|
||||
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_total",
|
||||
Help: "The total number of sending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_send_transaction_full_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for full size tx.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_repeat_transaction_failure_total",
|
||||
Help: "The total number of sending transaction failure for repeat transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_get_fee_failure_total",
|
||||
Help: "The total number of sending transaction failure for getting fee.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_send_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for sending tx.",
|
||||
}, []string{"service", "name"}),
|
||||
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_resubmit_send_transaction_total",
|
||||
Help: "The total number of resubmit transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_pending_tx_count",
|
||||
Help: "The pending tx count in the sender.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_fee_cap",
|
||||
Help: "The gas fee of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_tip_cap",
|
||||
Help: "The gas tip of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_price_cap",
|
||||
Help: "The gas price of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_limit",
|
||||
Help: "The gas limit of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_nonce",
|
||||
Help: "The nonce of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_check_pending_transaction_total",
|
||||
Help: "The total number of check pending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_check_balancer_total",
|
||||
Help: "The total number of check balancer.",
|
||||
}, []string{"service", "name"}),
|
||||
}
|
||||
})
|
||||
|
||||
return sm
|
||||
}
|
||||
@@ -1,24 +1,30 @@
|
||||
package sender
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
cmapV2 "github.com/orcaman/concurrent-map/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient/gethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
@@ -33,18 +39,12 @@ const (
|
||||
LegacyTxType = "LegacyTx"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoAvailableAccount indicates no available account error in the account pool.
|
||||
ErrNoAvailableAccount = errors.New("sender has no available account to send transaction")
|
||||
// ErrFullPending sender's pending pool is full.
|
||||
ErrFullPending = errors.New("sender's pending pool is full")
|
||||
)
|
||||
|
||||
// Confirmation struct used to indicate transaction confirmation details
|
||||
type Confirmation struct {
|
||||
ID string
|
||||
ContextID string
|
||||
IsSuccessful bool
|
||||
TxHash common.Hash
|
||||
SenderType types.SenderType
|
||||
}
|
||||
|
||||
// FeeData fee struct used to estimate gas price
|
||||
@@ -53,49 +53,45 @@ type FeeData struct {
|
||||
gasTipCap *big.Int
|
||||
gasPrice *big.Int
|
||||
|
||||
gasLimit uint64
|
||||
}
|
||||
accessList gethTypes.AccessList
|
||||
|
||||
// PendingTransaction submitted but pending transactions
|
||||
type PendingTransaction struct {
|
||||
submitAt uint64
|
||||
id string
|
||||
feeData *FeeData
|
||||
signer *bind.TransactOpts
|
||||
tx *types.Transaction
|
||||
gasLimit uint64
|
||||
}
|
||||
|
||||
// Sender Transaction sender to send transaction to l1/l2 geth
|
||||
type Sender struct {
|
||||
config *config.SenderConfig
|
||||
client *ethclient.Client // The client to retrieve on chain data or send transaction.
|
||||
chainID *big.Int // The chain id of the endpoint
|
||||
ctx context.Context
|
||||
service string
|
||||
name string
|
||||
config *config.SenderConfig
|
||||
gethClient *gethclient.Client
|
||||
client *ethclient.Client // The client to retrieve on chain data or send transaction.
|
||||
chainID *big.Int // The chain id of the endpoint
|
||||
ctx context.Context
|
||||
service string
|
||||
name string
|
||||
senderType types.SenderType
|
||||
|
||||
auth *bind.TransactOpts
|
||||
minBalance *big.Int
|
||||
auth *bind.TransactOpts
|
||||
|
||||
blockNumber uint64 // Current block number on chain.
|
||||
baseFeePerGas uint64 // Current base fee per gas on chain
|
||||
pendingTxs cmapV2.ConcurrentMap[string, *PendingTransaction] // Mapping from nonce to pending transaction
|
||||
confirmCh chan *Confirmation
|
||||
db *gorm.DB
|
||||
pendingTransactionOrm *orm.PendingTransaction
|
||||
|
||||
stopCh chan struct{}
|
||||
confirmCh chan *Confirmation
|
||||
stopCh chan struct{}
|
||||
|
||||
metrics *senderMetrics
|
||||
}
|
||||
|
||||
// NewSender returns a new instance of transaction sender
|
||||
// txConfirmationCh is used to notify confirmed transaction
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey, service, name string, reg prometheus.Registerer) (*Sender, error) {
|
||||
client, err := ethclient.Dial(config.Endpoint)
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey, service, name string, senderType types.SenderType, db *gorm.DB, reg prometheus.Registerer) (*Sender, error) {
|
||||
if config.EscalateMultipleNum <= config.EscalateMultipleDen {
|
||||
return nil, fmt.Errorf("invalid params, EscalateMultipleNum; %v, EscalateMultipleDen: %v", config.EscalateMultipleNum, config.EscalateMultipleDen)
|
||||
}
|
||||
|
||||
rpcClient, err := rpc.Dial(config.Endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
|
||||
}
|
||||
|
||||
// get chainID from client
|
||||
client := ethclient.NewClient(rpcClient)
|
||||
chainID, err := client.ChainID(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get chain ID, err: %w", err)
|
||||
@@ -113,35 +109,20 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
|
||||
}
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
|
||||
// get header by number
|
||||
header, err := client.HeaderByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get header by number, err: %w", err)
|
||||
}
|
||||
|
||||
var baseFeePerGas uint64
|
||||
if config.TxType == DynamicFeeTxType {
|
||||
if header.BaseFee != nil {
|
||||
baseFeePerGas = header.BaseFee.Uint64()
|
||||
} else {
|
||||
return nil, errors.New("dynamic fee tx type not supported: header.BaseFee is nil")
|
||||
}
|
||||
}
|
||||
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
client: client,
|
||||
chainID: chainID,
|
||||
auth: auth,
|
||||
minBalance: config.MinBalance,
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: cmapV2.New[*PendingTransaction](),
|
||||
stopCh: make(chan struct{}),
|
||||
name: name,
|
||||
service: service,
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
gethClient: gethclient.New(rpcClient),
|
||||
client: client,
|
||||
chainID: chainID,
|
||||
auth: auth,
|
||||
db: db,
|
||||
pendingTransactionOrm: orm.NewPendingTransaction(db),
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
stopCh: make(chan struct{}),
|
||||
name: name,
|
||||
service: service,
|
||||
senderType: senderType,
|
||||
}
|
||||
sender.metrics = initSenderMetrics(reg)
|
||||
|
||||
@@ -150,21 +131,6 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
|
||||
return sender, nil
|
||||
}
|
||||
|
||||
// PendingCount returns the current number of pending txs.
|
||||
func (s *Sender) PendingCount() int {
|
||||
return s.pendingTxs.Count()
|
||||
}
|
||||
|
||||
// PendingLimit returns the maximum number of pending txs the sender can handle.
|
||||
func (s *Sender) PendingLimit() int {
|
||||
return s.config.PendingLimit
|
||||
}
|
||||
|
||||
// IsFull returns true if the sender's pending tx pool is full.
|
||||
func (s *Sender) IsFull() bool {
|
||||
return s.pendingTxs.Count() >= s.config.PendingLimit
|
||||
}
|
||||
|
||||
// GetChainID returns the chain ID associated with the sender.
|
||||
func (s *Sender) GetChainID() *big.Int {
|
||||
return s.chainID
|
||||
@@ -173,7 +139,7 @@ func (s *Sender) GetChainID() *big.Int {
|
||||
// Stop stop the sender module.
|
||||
func (s *Sender) Stop() {
|
||||
close(s.stopCh)
|
||||
log.Info("Transaction sender stopped")
|
||||
log.Info("sender stopped", "name", s.name, "service", s.service, "address", s.auth.From.String())
|
||||
}
|
||||
|
||||
// ConfirmChan channel used to communicate with transaction sender
|
||||
@@ -187,67 +153,51 @@ func (s *Sender) SendConfirmation(cfm *Confirmation) {
|
||||
s.confirmCh <- cfm
|
||||
}
|
||||
|
||||
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
func (s *Sender) getFeeData(target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64, baseFee uint64) (*FeeData, error) {
|
||||
if s.config.TxType == DynamicFeeTxType {
|
||||
return s.estimateDynamicGas(auth, target, value, data, fallbackGasLimit)
|
||||
return s.estimateDynamicGas(target, value, data, fallbackGasLimit, baseFee)
|
||||
}
|
||||
return s.estimateLegacyGas(auth, target, value, data, fallbackGasLimit)
|
||||
return s.estimateLegacyGas(target, value, data, fallbackGasLimit)
|
||||
}
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (common.Hash, error) {
|
||||
func (s *Sender) SendTransaction(contextID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (common.Hash, error) {
|
||||
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
if s.IsFull() {
|
||||
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(1)
|
||||
return common.Hash{}, ErrFullPending
|
||||
}
|
||||
|
||||
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(0)
|
||||
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
|
||||
s.metrics.sendTransactionFailureRepeatTransaction.WithLabelValues(s.service, s.name).Inc()
|
||||
return common.Hash{}, fmt.Errorf("repeat transaction ID: %s", ID)
|
||||
}
|
||||
|
||||
var (
|
||||
feeData *FeeData
|
||||
tx *types.Transaction
|
||||
tx *gethTypes.Transaction
|
||||
err error
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.pendingTxs.Remove(ID) // release the ID on failure
|
||||
}
|
||||
}()
|
||||
blockNumber, baseFee, err := s.getBlockNumberAndBaseFee(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number and base fee", "error", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to get block number and base fee, err: %w", err)
|
||||
}
|
||||
|
||||
if feeData, err = s.getFeeData(s.auth, target, value, data, fallbackGasLimit); err != nil {
|
||||
if feeData, err = s.getFeeData(target, value, data, fallbackGasLimit, baseFee); err != nil {
|
||||
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to get fee data", "from", s.auth.From.String(), "nonce", s.auth.Nonce.Uint64(), "fallback gas limit", fallbackGasLimit, "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
|
||||
}
|
||||
|
||||
if tx, err = s.createAndSendTx(s.auth, feeData, target, value, data, nil); err != nil {
|
||||
if tx, err = s.createAndSendTx(feeData, target, value, data, nil); err != nil {
|
||||
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to create and send tx (non-resubmit case)", "from", s.auth.From.String(), "nonce", s.auth.Nonce.Uint64(), "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err)
|
||||
}
|
||||
|
||||
// add pending transaction
|
||||
pending := &PendingTransaction{
|
||||
tx: tx,
|
||||
id: ID,
|
||||
signer: s.auth,
|
||||
submitAt: atomic.LoadUint64(&s.blockNumber),
|
||||
feeData: feeData,
|
||||
if err = s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, contextID, s.getSenderMeta(), tx, blockNumber); err != nil {
|
||||
log.Error("failed to insert transaction", "from", s.auth.From.String(), "nonce", s.auth.Nonce.Uint64(), "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to insert transaction, err: %w", err)
|
||||
}
|
||||
s.pendingTxs.Set(ID, pending)
|
||||
return tx.Hash(), nil
|
||||
}
|
||||
|
||||
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (*types.Transaction, error) {
|
||||
func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (*gethTypes.Transaction, error) {
|
||||
var (
|
||||
nonce = auth.Nonce.Uint64()
|
||||
txData types.TxData
|
||||
nonce = s.auth.Nonce.Uint64()
|
||||
txData gethTypes.TxData
|
||||
)
|
||||
|
||||
// this is a resubmit call, override the nonce
|
||||
@@ -255,11 +205,10 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
|
||||
nonce = *overrideNonce
|
||||
}
|
||||
|
||||
// lock here to avoit blocking when call `SuggestGasPrice`
|
||||
switch s.config.TxType {
|
||||
case LegacyTxType:
|
||||
// for ganache mock node
|
||||
txData = &types.LegacyTx{
|
||||
txData = &gethTypes.LegacyTx{
|
||||
Nonce: nonce,
|
||||
GasPrice: feeData.gasPrice,
|
||||
Gas: feeData.gasLimit,
|
||||
@@ -271,7 +220,7 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
|
||||
S: new(big.Int),
|
||||
}
|
||||
case AccessListTxType:
|
||||
txData = &types.AccessListTx{
|
||||
txData = &gethTypes.AccessListTx{
|
||||
ChainID: s.chainID,
|
||||
Nonce: nonce,
|
||||
GasPrice: feeData.gasPrice,
|
||||
@@ -279,18 +228,18 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
|
||||
To: target,
|
||||
Value: new(big.Int).Set(value),
|
||||
Data: common.CopyBytes(data),
|
||||
AccessList: make(types.AccessList, 0),
|
||||
AccessList: feeData.accessList,
|
||||
V: new(big.Int),
|
||||
R: new(big.Int),
|
||||
S: new(big.Int),
|
||||
}
|
||||
default:
|
||||
txData = &types.DynamicFeeTx{
|
||||
txData = &gethTypes.DynamicFeeTx{
|
||||
Nonce: nonce,
|
||||
To: target,
|
||||
Data: common.CopyBytes(data),
|
||||
Gas: feeData.gasLimit,
|
||||
AccessList: make(types.AccessList, 0),
|
||||
AccessList: feeData.accessList,
|
||||
Value: new(big.Int).Set(value),
|
||||
ChainID: s.chainID,
|
||||
GasTipCap: feeData.gasTipCap,
|
||||
@@ -302,13 +251,14 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
|
||||
}
|
||||
|
||||
// sign and send
|
||||
tx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
tx, err := s.auth.Signer(s.auth.From, gethTypes.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Error("failed to sign tx", "address", auth.From.String(), "err", err)
|
||||
log.Error("failed to sign tx", "address", s.auth.From.String(), "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = s.client.SendTransaction(s.ctx, tx); err != nil {
|
||||
log.Error("failed to send tx", "tx hash", tx.Hash().String(), "from", auth.From.String(), "nonce", tx.Nonce(), "err", err)
|
||||
log.Error("failed to send tx", "tx hash", tx.Hash().String(), "from", s.auth.From.String(), "nonce", tx.Nonce(), "err", err)
|
||||
// Check if contain nonce, and reset nonce
|
||||
// only reset nonce when it is not from resubmit
|
||||
if strings.Contains(err.Error(), "nonce") && overrideNonce == nil {
|
||||
@@ -333,12 +283,12 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
|
||||
|
||||
// update nonce when it is not from resubmit
|
||||
if overrideNonce == nil {
|
||||
auth.Nonce = big.NewInt(int64(nonce + 1))
|
||||
s.auth.Nonce = big.NewInt(int64(nonce + 1))
|
||||
}
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
// reSetNonce reset nonce if send signed tx failed.
|
||||
// resetNonce reset nonce if send signed tx failed.
|
||||
func (s *Sender) resetNonce(ctx context.Context) {
|
||||
nonce, err := s.client.PendingNonceAt(ctx, s.auth.From)
|
||||
if err != nil {
|
||||
@@ -348,7 +298,7 @@ func (s *Sender) resetNonce(ctx context.Context) {
|
||||
s.auth.Nonce = big.NewInt(int64(nonce))
|
||||
}
|
||||
|
||||
func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts, tx *types.Transaction) (*types.Transaction, error) {
|
||||
func (s *Sender) resubmitTransaction(auth *bind.TransactOpts, tx *gethTypes.Transaction, baseFee uint64) (*gethTypes.Transaction, error) {
|
||||
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
|
||||
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
|
||||
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
|
||||
@@ -360,14 +310,13 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
"nonce": tx.Nonce(),
|
||||
}
|
||||
|
||||
var feeData FeeData
|
||||
feeData.gasLimit = tx.Gas()
|
||||
switch s.config.TxType {
|
||||
case LegacyTxType, AccessListTxType: // `LegacyTxType`is for ganache mock node
|
||||
originalGasPrice := feeData.gasPrice
|
||||
gasPrice := escalateMultipleNum.Mul(escalateMultipleNum, big.NewInt(feeData.gasPrice.Int64()))
|
||||
originalGasPrice := tx.GasPrice()
|
||||
gasPrice := new(big.Int).Mul(escalateMultipleNum, originalGasPrice)
|
||||
gasPrice = gasPrice.Div(gasPrice, escalateMultipleDen)
|
||||
if gasPrice.Cmp(feeData.gasPrice) < 0 {
|
||||
gasPrice = feeData.gasPrice
|
||||
}
|
||||
if gasPrice.Cmp(maxGasPrice) > 0 {
|
||||
gasPrice = maxGasPrice
|
||||
}
|
||||
@@ -381,27 +330,16 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
txInfo["original_gas_price"] = originalGasPrice.Uint64()
|
||||
txInfo["adjusted_gas_price"] = gasPrice.Uint64()
|
||||
default:
|
||||
originalGasTipCap := big.NewInt(feeData.gasTipCap.Int64())
|
||||
originalGasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
|
||||
originalGasTipCap := tx.GasTipCap()
|
||||
originalGasFeeCap := tx.GasFeeCap()
|
||||
|
||||
gasTipCap := big.NewInt(feeData.gasTipCap.Int64())
|
||||
gasTipCap = gasTipCap.Mul(gasTipCap, escalateMultipleNum)
|
||||
gasTipCap := new(big.Int).Mul(originalGasTipCap, escalateMultipleNum)
|
||||
gasTipCap = gasTipCap.Div(gasTipCap, escalateMultipleDen)
|
||||
gasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
|
||||
gasFeeCap = gasFeeCap.Mul(gasFeeCap, escalateMultipleNum)
|
||||
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, escalateMultipleNum)
|
||||
gasFeeCap = gasFeeCap.Div(gasFeeCap, escalateMultipleDen)
|
||||
if gasFeeCap.Cmp(feeData.gasFeeCap) < 0 {
|
||||
gasFeeCap = feeData.gasFeeCap
|
||||
}
|
||||
if gasTipCap.Cmp(feeData.gasTipCap) < 0 {
|
||||
gasTipCap = feeData.gasTipCap
|
||||
}
|
||||
|
||||
// adjust for rising basefee
|
||||
adjBaseFee := big.NewInt(0)
|
||||
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
|
||||
adjBaseFee.SetUint64(feeGas)
|
||||
}
|
||||
adjBaseFee := new(big.Int).SetUint64(baseFee)
|
||||
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
|
||||
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
|
||||
currentGasFeeCap := new(big.Int).Add(gasTipCap, adjBaseFee)
|
||||
@@ -414,6 +352,11 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
// gasTipCap <= gasFeeCap
|
||||
if gasTipCap.Cmp(gasFeeCap) > 0 {
|
||||
gasTipCap = gasFeeCap
|
||||
}
|
||||
|
||||
if originalGasTipCap.Cmp(gasTipCap) == 0 {
|
||||
log.Warn("gas tip cap bump corner case, add 1 wei", "original", originalGasTipCap.Uint64(), "adjusted", gasTipCap.Uint64())
|
||||
gasTipCap = new(big.Int).Add(gasTipCap, big.NewInt(1))
|
||||
@@ -432,11 +375,11 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
|
||||
}
|
||||
|
||||
log.Info("Transaction gas adjustment details", txInfo)
|
||||
log.Info("Transaction gas adjustment details", "txInfo", txInfo)
|
||||
|
||||
nonce := tx.Nonce()
|
||||
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
tx, err := s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
|
||||
tx, err := s.createAndSendTx(&feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
|
||||
if err != nil {
|
||||
log.Error("failed to create and send tx (resubmit case)", "from", s.auth.From.String(), "nonce", nonce, "err", err)
|
||||
return nil, err
|
||||
@@ -446,137 +389,116 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
|
||||
// checkPendingTransaction checks the confirmation status of pending transactions against the latest confirmed block number.
|
||||
// If a transaction hasn't been confirmed after a certain number of blocks, it will be resubmitted with an increased gas price.
|
||||
func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64) {
|
||||
number := header.Number.Uint64()
|
||||
atomic.StoreUint64(&s.blockNumber, number)
|
||||
func (s *Sender) checkPendingTransaction() {
|
||||
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
|
||||
if s.config.TxType == DynamicFeeTxType {
|
||||
if header.BaseFee != nil {
|
||||
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
|
||||
} else {
|
||||
log.Error("DynamicFeeTxType not supported, header.BaseFee nil")
|
||||
}
|
||||
blockNumber, baseFee, err := s.getBlockNumberAndBaseFee(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number and base fee", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
for item := range s.pendingTxs.IterBuffered() {
|
||||
key, pending := item.Key, item.Val
|
||||
// ignore empty id, since we use empty id to occupy pending task
|
||||
if pending == nil {
|
||||
transactionsToCheck, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(s.ctx, s.senderType, 100)
|
||||
if err != nil {
|
||||
log.Error("failed to load pending transactions", "sender meta", s.getSenderMeta(), "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
confirmed, err := utils.GetLatestConfirmedBlockNumber(s.ctx, s.client, s.config.Confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest confirmed block number", "confirmations", s.config.Confirmations, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, txnToCheck := range transactionsToCheck {
|
||||
tx := new(gethTypes.Transaction)
|
||||
if err := tx.DecodeRLP(rlp.NewStream(bytes.NewReader(txnToCheck.RLPEncoding), 0)); err != nil {
|
||||
log.Error("failed to decode RLP", "context ID", txnToCheck.ContextID, "sender meta", s.getSenderMeta(), "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
|
||||
if (err == nil) && (receipt != nil) {
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, tx.Hash())
|
||||
if (err == nil) && (receipt != nil) { // tx confirmed.
|
||||
if receipt.BlockNumber.Uint64() <= confirmed {
|
||||
s.pendingTxs.Remove(key)
|
||||
err := s.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
// Update the status of the transaction to TxStatusConfirmed.
|
||||
if err := s.pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(s.ctx, tx.Hash(), types.TxStatusConfirmed, dbTX); err != nil {
|
||||
log.Error("failed to update transaction status by tx hash", "hash", tx.Hash().String(), "sender meta", s.getSenderMeta(), "from", s.auth.From.String(), "nonce", tx.Nonce(), "err", err)
|
||||
return err
|
||||
}
|
||||
// Update other transactions with the same nonce and sender address as failed.
|
||||
if err := s.pendingTransactionOrm.UpdateOtherTransactionsAsFailedByNonce(s.ctx, txnToCheck.SenderAddress, tx.Nonce(), tx.Hash(), dbTX); err != nil {
|
||||
log.Error("failed to update other transactions as failed by nonce", "senderAddress", txnToCheck.SenderAddress, "nonce", tx.Nonce(), "excludedTxHash", tx.Hash(), "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("db transaction failed", "err", err)
|
||||
}
|
||||
|
||||
// send confirm message
|
||||
s.confirmCh <- &Confirmation{
|
||||
ID: pending.id,
|
||||
IsSuccessful: receipt.Status == types.ReceiptStatusSuccessful,
|
||||
TxHash: pending.tx.Hash(),
|
||||
ContextID: txnToCheck.ContextID,
|
||||
IsSuccessful: receipt.Status == gethTypes.ReceiptStatusSuccessful,
|
||||
TxHash: tx.Hash(),
|
||||
SenderType: s.senderType,
|
||||
}
|
||||
}
|
||||
} else if s.config.EscalateBlocks+pending.submitAt < number {
|
||||
log.Info("resubmit transaction",
|
||||
"hash", pending.tx.Hash().String(),
|
||||
"from", pending.signer.From.String(),
|
||||
"nonce", pending.tx.Nonce(),
|
||||
"submit block number", pending.submitAt,
|
||||
"current block number", number,
|
||||
"configured escalateBlocks", s.config.EscalateBlocks)
|
||||
} else if txnToCheck.Status == types.TxStatusPending && // Only try resubmitting a new transaction based on gas price of the last transaction (status pending) with same ContextID.
|
||||
s.config.EscalateBlocks+txnToCheck.SubmitBlockNumber <= blockNumber {
|
||||
// It's possible that the pending transaction was marked as failed earlier in this loop (e.g., if one of its replacements has already been confirmed).
|
||||
// Therefore, we fetch the current transaction status again for accuracy before proceeding.
|
||||
status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(s.ctx, tx.Hash())
|
||||
if err != nil {
|
||||
log.Error("failed to get transaction status by tx hash", "hash", tx.Hash().String(), "err", err)
|
||||
return
|
||||
}
|
||||
if status == types.TxStatusConfirmedFailed {
|
||||
log.Warn("transaction already marked as failed, skipping resubmission", "hash", tx.Hash().String())
|
||||
return
|
||||
}
|
||||
|
||||
if tx, err := s.resubmitTransaction(pending.feeData, pending.signer, pending.tx); err != nil {
|
||||
// If account pool is empty, it will try again in next loop.
|
||||
if !errors.Is(err, ErrNoAvailableAccount) {
|
||||
log.Error("failed to resubmit transaction, reset submitAt", "tx hash", pending.tx.Hash().String(), "err", err)
|
||||
}
|
||||
// This means one of the old transactions is confirmed
|
||||
// One scenario is
|
||||
// 1. Initially, we replace the tx three times and submit it to local node.
|
||||
// Currently, we keep the last tx hash in the memory.
|
||||
// 2. Other node packed the 2-nd tx or 3-rd tx, and the local node has received the block now.
|
||||
// 3. When we resubmit the 4-th tx, we got a nonce error.
|
||||
// 4. We need to check the status of 3-rd tx stored in our memory
|
||||
// 4.1 If the 3-rd tx is packed, we got a receipt and 3-nd is marked as confirmed.
|
||||
// 4.2 If the 2-nd tx is packed, we got nothing from `TransactionReceipt` call. Since we
|
||||
// cannot do anything about, we just log some information. In this case, the caller
|
||||
// of `sender.SendTransaction` should write extra code to handle the situation.
|
||||
// Another scenario is private key leaking and someone send a transaction with the same nonce.
|
||||
// We need to stop the program and manually handle the situation.
|
||||
if strings.Contains(err.Error(), "nonce") {
|
||||
// This key can be deleted
|
||||
s.pendingTxs.Remove(key)
|
||||
// Try get receipt by the latest replaced tx hash
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
|
||||
if (err == nil) && (receipt != nil) {
|
||||
// send confirm message
|
||||
s.confirmCh <- &Confirmation{
|
||||
ID: pending.id,
|
||||
IsSuccessful: receipt.Status == types.ReceiptStatusSuccessful,
|
||||
TxHash: pending.tx.Hash(),
|
||||
}
|
||||
} else {
|
||||
// The receipt can be nil since the confirmed transaction may not be the latest one.
|
||||
// We just ignore it, the caller of the sender pool should handle this situation.
|
||||
log.Warn("Pending transaction is confirmed by one of the replaced transactions", "key", key, "signer", pending.signer.From, "nonce", pending.tx.Nonce())
|
||||
}
|
||||
}
|
||||
log.Info("resubmit transaction",
|
||||
"hash", tx.Hash().String(),
|
||||
"from", s.auth.From.String(),
|
||||
"nonce", tx.Nonce(),
|
||||
"submitBlockNumber", txnToCheck.SubmitBlockNumber,
|
||||
"currentBlockNumber", blockNumber,
|
||||
"escalateBlocks", s.config.EscalateBlocks)
|
||||
|
||||
if newTx, err := s.resubmitTransaction(s.auth, tx, baseFee); err != nil {
|
||||
s.metrics.resubmitTransactionFailedTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to resubmit transaction", "context ID", txnToCheck.ContextID, "sender meta", s.getSenderMeta(), "from", s.auth.From.String(), "nonce", newTx.Nonce(), "err", err)
|
||||
} else {
|
||||
// flush submitAt
|
||||
pending.tx = tx
|
||||
pending.submitAt = number
|
||||
err := s.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
// Update the status of the original transaction as replaced, while still checking its confirmation status.
|
||||
if err := s.pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(s.ctx, tx.Hash(), types.TxStatusReplaced, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update status of transaction with hash %s to TxStatusReplaced, err: %w", tx.Hash().String(), err)
|
||||
}
|
||||
// Record the new transaction that has replaced the original one.
|
||||
if err := s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, txnToCheck.ContextID, s.getSenderMeta(), newTx, txnToCheck.SubmitBlockNumber, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to insert new pending transaction with context ID: %s, nonce: %d, hash: %v, err: %w", txnToCheck.ContextID, newTx.Nonce(), newTx.Hash().String(), err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("db transaction failed", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkBalance checks balance and print error log if balance is under a threshold.
|
||||
func (s *Sender) checkBalance(ctx context.Context) error {
|
||||
bls, err := s.client.BalanceAt(ctx, s.auth.From, nil)
|
||||
if err != nil {
|
||||
log.Warn("failed to get balance", "address", s.auth.From.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if bls.Cmp(s.minBalance) < 0 {
|
||||
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s, address: %s",
|
||||
bls.String(), s.minBalance.String(), s.auth.From.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loop is the main event loop
|
||||
func (s *Sender) loop(ctx context.Context) {
|
||||
checkTick := time.NewTicker(time.Duration(s.config.CheckPendingTime) * time.Second)
|
||||
defer checkTick.Stop()
|
||||
|
||||
checkBalanceTicker := time.NewTicker(time.Duration(s.config.CheckBalanceTime) * time.Second)
|
||||
defer checkBalanceTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-checkTick.C:
|
||||
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
header, err := s.client.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest head", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
confirmed, err := utils.GetLatestConfirmedBlockNumber(s.ctx, s.client, s.config.Confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest confirmed block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
s.checkPendingTransaction(header, confirmed)
|
||||
case <-checkBalanceTicker.C:
|
||||
s.metrics.senderCheckBalancerTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
// Check and set balance.
|
||||
if err := s.checkBalance(ctx); err != nil {
|
||||
log.Error("check balance error", "err", err)
|
||||
}
|
||||
s.checkPendingTransaction()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-s.stopCh:
|
||||
@@ -584,3 +506,29 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sender) getSenderMeta() *orm.SenderMeta {
|
||||
return &orm.SenderMeta{
|
||||
Name: s.name,
|
||||
Service: s.service,
|
||||
Address: s.auth.From,
|
||||
Type: s.senderType,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sender) getBlockNumberAndBaseFee(ctx context.Context) (uint64, uint64, error) {
|
||||
header, err := s.client.HeaderByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get header by number, err: %w", err)
|
||||
}
|
||||
|
||||
var baseFeePerGas uint64
|
||||
if s.config.TxType == DynamicFeeTxType {
|
||||
if header.BaseFee != nil {
|
||||
baseFeePerGas = header.BaseFee.Uint64()
|
||||
} else {
|
||||
return 0, 0, errors.New("dynamic fee tx type not supported: header.BaseFee is nil")
|
||||
}
|
||||
}
|
||||
return header.Number.Uint64(), baseFeePerGas, nil
|
||||
}
|
||||
|
||||
75
rollup/internal/controller/sender/sender_metrics.go
Normal file
75
rollup/internal/controller/sender/sender_metrics.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package sender
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type senderMetrics struct {
|
||||
senderCheckPendingTransactionTotal *prometheus.CounterVec
|
||||
sendTransactionTotal *prometheus.CounterVec
|
||||
sendTransactionFailureGetFee *prometheus.CounterVec
|
||||
sendTransactionFailureSendTx *prometheus.CounterVec
|
||||
resubmitTransactionTotal *prometheus.CounterVec
|
||||
resubmitTransactionFailedTotal *prometheus.CounterVec
|
||||
currentGasFeeCap *prometheus.GaugeVec
|
||||
currentGasTipCap *prometheus.GaugeVec
|
||||
currentGasPrice *prometheus.GaugeVec
|
||||
currentGasLimit *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
var (
|
||||
initSenderMetricOnce sync.Once
|
||||
sm *senderMetrics
|
||||
)
|
||||
|
||||
func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
|
||||
initSenderMetricOnce.Do(func() {
|
||||
sm = &senderMetrics{
|
||||
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_total",
|
||||
Help: "The total number of sending transactions.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_get_fee_failure_total",
|
||||
Help: "The total number of sending transactions failure for getting fee.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_send_tx_failure_total",
|
||||
Help: "The total number of sending transactions failure for sending tx.",
|
||||
}, []string{"service", "name"}),
|
||||
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_resubmit_send_transaction_total",
|
||||
Help: "The total number of resubmit transactions.",
|
||||
}, []string{"service", "name"}),
|
||||
resubmitTransactionFailedTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_send_transaction_resubmit_send_transaction_failed_total",
|
||||
Help: "The total number of failed resubmit transactions.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_fee_cap",
|
||||
Help: "The gas fee cap of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_tip_cap",
|
||||
Help: "The gas tip cap of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_price_cap",
|
||||
Help: "The gas price of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "rollup_sender_gas_limit",
|
||||
Help: "The gas limit of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "rollup_sender_check_pending_transaction_total",
|
||||
Help: "The total number of check pending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
}
|
||||
})
|
||||
|
||||
return sm
|
||||
}
|
||||
@@ -4,31 +4,41 @@ import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/mock_bridge"
|
||||
)
|
||||
|
||||
const TXBatch = 50
|
||||
|
||||
var (
|
||||
privateKey *ecdsa.PrivateKey
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
|
||||
privateKey *ecdsa.PrivateKey
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
|
||||
db *gorm.DB
|
||||
mockL1ContractsAddress common.Address
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -40,17 +50,45 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
var err error
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
base.RunL1Geth(t)
|
||||
priv, err := crypto.HexToECDSA("1212121212121212121212121212121212121212121212121212121212121212")
|
||||
assert.NoError(t, err)
|
||||
// Load default private key.
|
||||
privateKey = priv
|
||||
|
||||
base.RunL1Geth(t)
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.CheckBalanceTime = 1
|
||||
|
||||
base.RunDBImage(t)
|
||||
db, err = database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, base.L1gethImg.ChainID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Client, err := base.L1Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, tx, _, err := mock_bridge.DeployMockBridgeL1(auth, l1Client)
|
||||
assert.NoError(t, err)
|
||||
|
||||
mockL1ContractsAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSender(t *testing.T) {
|
||||
@@ -58,21 +96,28 @@ func TestSender(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
t.Run("test new sender", testNewSender)
|
||||
t.Run("test pending limit", testPendLimit)
|
||||
t.Run("test fallback gas limit", testFallbackGasLimit)
|
||||
t.Run("test send and retrieve transaction", testSendAndRetrieveTransaction)
|
||||
t.Run("test access list transaction gas limit", testAccessListTransactionGasLimit)
|
||||
t.Run("test resubmit zero gas price transaction", testResubmitZeroGasPriceTransaction)
|
||||
t.Run("test resubmit non-zero gas price transaction", testResubmitNonZeroGasPriceTransaction)
|
||||
t.Run("test resubmit under priced transaction", testResubmitUnderpricedTransaction)
|
||||
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
|
||||
t.Run("test check pending transaction", testCheckPendingTransaction)
|
||||
t.Run("test check pending transaction tx confirmed", testCheckPendingTransactionTxConfirmed)
|
||||
t.Run("test check pending transaction resubmit tx confirmed", testCheckPendingTransactionResubmitTxConfirmed)
|
||||
t.Run("test check pending transaction replaced tx confirmed", testCheckPendingTransactionReplacedTxConfirmed)
|
||||
}
|
||||
|
||||
func testNewSender(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
// exit by Stop()
|
||||
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy1.TxType = txType
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", nil)
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
newSender1.Stop()
|
||||
|
||||
@@ -80,43 +125,57 @@ func testNewSender(t *testing.T) {
|
||||
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy2.TxType = txType
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", nil)
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func testPendLimit(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
func testSendAndRetrieveTransaction(t *testing.T) {
|
||||
for i, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
cfgCopy.PendingLimit = 2
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 2*newSender.PendingLimit(); i++ {
|
||||
_, err = newSender.SendTransaction(strconv.Itoa(i), &common.Address{}, big.NewInt(1), nil, 0)
|
||||
assert.True(t, err == nil || (err != nil && err.Error() == "sender's pending pool is full"))
|
||||
}
|
||||
assert.True(t, newSender.PendingCount() <= newSender.PendingLimit())
|
||||
newSender.Stop()
|
||||
hash, err := s.SendTransaction("0", &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 1)
|
||||
assert.Equal(t, "0", txs[0].ContextID)
|
||||
assert.Equal(t, hash.String(), txs[0].Hash)
|
||||
assert.Equal(t, uint8(i), txs[0].Type)
|
||||
assert.Equal(t, types.TxStatusPending, txs[0].Status)
|
||||
assert.Equal(t, "0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63", txs[0].SenderAddress)
|
||||
assert.Equal(t, types.SenderTypeUnknown, txs[0].SenderType)
|
||||
assert.Equal(t, "test", txs[0].SenderService)
|
||||
assert.Equal(t, "test", txs[0].SenderName)
|
||||
s.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func testFallbackGasLimit(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
client, err := ethclient.Dial(cfgCopy.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// FallbackGasLimit = 0
|
||||
txHash0, err := s.SendTransaction("0", &common.Address{}, big.NewInt(1), nil, 0)
|
||||
txHash0, err := s.SendTransaction("0", &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
|
||||
assert.NoError(t, err)
|
||||
@@ -124,12 +183,12 @@ func testFallbackGasLimit(t *testing.T) {
|
||||
|
||||
// FallbackGasLimit = 100000
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(s, "estimateGasLimit",
|
||||
func(opts *bind.TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) {
|
||||
return 0, errors.New("estimateGasLimit error")
|
||||
func(contract *common.Address, data []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, *gethTypes.AccessList, error) {
|
||||
return 0, nil, errors.New("estimateGasLimit error")
|
||||
},
|
||||
)
|
||||
|
||||
txHash1, err := s.SendTransaction("1", &common.Address{}, big.NewInt(1), nil, 100000)
|
||||
txHash1, err := s.SendTransaction("1", &common.Address{}, big.NewInt(0), nil, 100000)
|
||||
assert.NoError(t, err)
|
||||
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
|
||||
assert.NoError(t, err)
|
||||
@@ -142,9 +201,13 @@ func testFallbackGasLimit(t *testing.T) {
|
||||
|
||||
func testResubmitZeroGasPriceTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
feeData := &FeeData{
|
||||
gasPrice: big.NewInt(0),
|
||||
@@ -152,24 +215,59 @@ func testResubmitZeroGasPriceTransaction(t *testing.T) {
|
||||
gasFeeCap: big.NewInt(0),
|
||||
gasLimit: 50000,
|
||||
}
|
||||
tx, err := s.createAndSendTx(s.auth, feeData, &common.Address{}, big.NewInt(0), nil, nil)
|
||||
tx, err := s.createAndSendTx(feeData, &common.Address{}, big.NewInt(0), nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, tx)
|
||||
// Increase at least 1 wei in gas price, gas tip cap and gas fee cap.
|
||||
_, err = s.resubmitTransaction(feeData, s.auth, tx)
|
||||
_, err = s.resubmitTransaction(s.auth, tx, 0)
|
||||
assert.NoError(t, err)
|
||||
s.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func testAccessListTransactionGasLimit(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2GasOracleABI, err := bridgeAbi.L2GasPriceOracleMetaData.GetAbi()
|
||||
assert.NoError(t, err)
|
||||
|
||||
data, err := l2GasOracleABI.Pack("setL2BaseFee", big.NewInt(2333))
|
||||
assert.NoError(t, err)
|
||||
|
||||
gasLimit, accessList, err := s.estimateGasLimit(&mockL1ContractsAddress, data, big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(0), true)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(43472), gasLimit)
|
||||
assert.NotNil(t, accessList)
|
||||
|
||||
gasLimit, accessList, err = s.estimateGasLimit(&mockL1ContractsAddress, data, big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(0), false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(43949), gasLimit)
|
||||
assert.Nil(t, accessList)
|
||||
|
||||
s.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
// Bump gas price, gas tip cap and gas fee cap just touch the minimum threshold of 10% (default config of geth).
|
||||
cfgCopy.EscalateMultipleNum = 110
|
||||
cfgCopy.EscalateMultipleDen = 100
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
feeData := &FeeData{
|
||||
gasPrice: big.NewInt(100000),
|
||||
@@ -177,10 +275,10 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
gasFeeCap: big.NewInt(100000),
|
||||
gasLimit: 50000,
|
||||
}
|
||||
tx, err := s.createAndSendTx(s.auth, feeData, &common.Address{}, big.NewInt(0), nil, nil)
|
||||
tx, err := s.createAndSendTx(feeData, &common.Address{}, big.NewInt(0), nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, tx)
|
||||
_, err = s.resubmitTransaction(feeData, s.auth, tx)
|
||||
_, err = s.resubmitTransaction(s.auth, tx, 0)
|
||||
assert.NoError(t, err)
|
||||
s.Stop()
|
||||
}
|
||||
@@ -188,12 +286,16 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
|
||||
func testResubmitUnderpricedTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
// Bump gas price, gas tip cap and gas fee cap less than 10% (default config of geth).
|
||||
cfgCopy.EscalateMultipleNum = 109
|
||||
cfgCopy.EscalateMultipleDen = 100
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
feeData := &FeeData{
|
||||
gasPrice: big.NewInt(100000),
|
||||
@@ -201,30 +303,32 @@ func testResubmitUnderpricedTransaction(t *testing.T) {
|
||||
gasFeeCap: big.NewInt(100000),
|
||||
gasLimit: 50000,
|
||||
}
|
||||
tx, err := s.createAndSendTx(s.auth, feeData, &common.Address{}, big.NewInt(0), nil, nil)
|
||||
tx, err := s.createAndSendTx(feeData, &common.Address{}, big.NewInt(0), nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, tx)
|
||||
_, err = s.resubmitTransaction(feeData, s.auth, tx)
|
||||
_, err = s.resubmitTransaction(s.auth, tx, 0)
|
||||
assert.Error(t, err, "replacement transaction underpriced")
|
||||
s.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
|
||||
txType := "DynamicFeeTx"
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
txType := "DynamicFeeTx"
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
s.baseFeePerGas = 1000
|
||||
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
tx := gethTypes.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 21000, big.NewInt(0), nil)
|
||||
baseFeePerGas := uint64(1000)
|
||||
// bump the basefee by 10x
|
||||
s.baseFeePerGas *= 10
|
||||
baseFeePerGas *= 10
|
||||
// resubmit and check that the gas fee has been adjusted accordingly
|
||||
newTx, err := s.resubmitTransaction(feeData, s.auth, tx)
|
||||
newTx, err := s.resubmitTransaction(s.auth, tx, baseFeePerGas)
|
||||
assert.NoError(t, err)
|
||||
|
||||
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
|
||||
@@ -232,104 +336,171 @@ func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
|
||||
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
|
||||
|
||||
adjBaseFee := new(big.Int)
|
||||
adjBaseFee.SetUint64(s.baseFeePerGas)
|
||||
adjBaseFee.SetUint64(baseFeePerGas)
|
||||
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
|
||||
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
|
||||
|
||||
expectedGasFeeCap := new(big.Int).Add(feeData.gasTipCap, adjBaseFee)
|
||||
expectedGasFeeCap := new(big.Int).Add(tx.GasTipCap(), adjBaseFee)
|
||||
if expectedGasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
expectedGasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedGasFeeCap.Int64(), newTx.GasFeeCap().Int64())
|
||||
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
func testCheckPendingTransaction(t *testing.T) {
|
||||
func testCheckPendingTransactionTxConfirmed(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
header := &types.Header{Number: big.NewInt(100), BaseFee: big.NewInt(100)}
|
||||
confirmed := uint64(100)
|
||||
receipt := &types.Receipt{Status: types.ReceiptStatusSuccessful, BlockNumber: big.NewInt(90)}
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
_, err = s.SendTransaction("test", &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
receipt *types.Receipt
|
||||
receiptErr error
|
||||
resubmitErr error
|
||||
expectedCount int
|
||||
expectedFound bool
|
||||
}{
|
||||
{
|
||||
name: "Normal case, transaction receipt exists and successful",
|
||||
receipt: receipt,
|
||||
receiptErr: nil,
|
||||
resubmitErr: nil,
|
||||
expectedCount: 0,
|
||||
expectedFound: false,
|
||||
},
|
||||
{
|
||||
name: "Resubmit case, resubmitTransaction error (not nonce) case",
|
||||
receipt: receipt,
|
||||
receiptErr: errors.New("receipt error"),
|
||||
resubmitErr: errors.New("resubmit error"),
|
||||
expectedCount: 1,
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "Resubmit case, resubmitTransaction success case",
|
||||
receipt: receipt,
|
||||
receiptErr: errors.New("receipt error"),
|
||||
resubmitErr: nil,
|
||||
expectedCount: 1,
|
||||
expectedFound: true,
|
||||
},
|
||||
}
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 1)
|
||||
assert.Equal(t, types.TxStatusPending, txs[0].Status)
|
||||
assert.Equal(t, types.SenderTypeCommitBatch, txs[0].SenderType)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var c *ethclient.Client
|
||||
patchGuard := gomonkey.ApplyMethodFunc(c, "TransactionReceipt", func(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||
return tc.receipt, tc.receiptErr
|
||||
})
|
||||
patchGuard.ApplyPrivateMethod(s, "resubmitTransaction",
|
||||
func(feeData *FeeData, auth *bind.TransactOpts, tx *types.Transaction) (*types.Transaction, error) {
|
||||
return tx, tc.resubmitErr
|
||||
},
|
||||
)
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s.client, "TransactionReceipt", func(_ context.Context, hash common.Hash) (*gethTypes.Receipt, error) {
|
||||
return &gethTypes.Receipt{TxHash: hash, BlockNumber: big.NewInt(0), Status: gethTypes.ReceiptStatusSuccessful}, nil
|
||||
})
|
||||
|
||||
pendingTx := &PendingTransaction{id: "abc", tx: tx, signer: s.auth, submitAt: header.Number.Uint64() - s.config.EscalateBlocks - 1}
|
||||
s.pendingTxs.Set(pendingTx.id, pendingTx)
|
||||
s.checkPendingTransaction(header, confirmed)
|
||||
s.checkPendingTransaction()
|
||||
assert.NoError(t, err)
|
||||
|
||||
if tc.receiptErr == nil {
|
||||
expectedConfirmation := &Confirmation{
|
||||
ID: pendingTx.id,
|
||||
IsSuccessful: tc.receipt.Status == types.ReceiptStatusSuccessful,
|
||||
TxHash: pendingTx.tx.Hash(),
|
||||
}
|
||||
actualConfirmation := <-s.confirmCh
|
||||
assert.Equal(t, expectedConfirmation, actualConfirmation)
|
||||
}
|
||||
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 0)
|
||||
|
||||
if tc.expectedFound && tc.resubmitErr == nil {
|
||||
actualPendingTx, found := s.pendingTxs.Get(pendingTx.id)
|
||||
assert.Equal(t, true, found)
|
||||
assert.Equal(t, header.Number.Uint64(), actualPendingTx.submitAt)
|
||||
}
|
||||
|
||||
_, found := s.pendingTxs.Get(pendingTx.id)
|
||||
assert.Equal(t, tc.expectedFound, found)
|
||||
assert.Equal(t, tc.expectedCount, s.pendingTxs.Count())
|
||||
patchGuard.Reset()
|
||||
})
|
||||
}
|
||||
s.Stop()
|
||||
patchGuard.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func testCheckPendingTransactionResubmitTxConfirmed(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.EscalateBlocks = 0
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeFinalizeBatch, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
originTxHash, err := s.SendTransaction("test", &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 1)
|
||||
assert.Equal(t, types.TxStatusPending, txs[0].Status)
|
||||
assert.Equal(t, types.SenderTypeFinalizeBatch, txs[0].SenderType)
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s.client, "TransactionReceipt", func(_ context.Context, hash common.Hash) (*gethTypes.Receipt, error) {
|
||||
if hash == originTxHash {
|
||||
return nil, fmt.Errorf("simulated transaction receipt error")
|
||||
}
|
||||
return &gethTypes.Receipt{TxHash: hash, BlockNumber: big.NewInt(0), Status: gethTypes.ReceiptStatusSuccessful}, nil
|
||||
})
|
||||
|
||||
// Attempt to resubmit the transaction.
|
||||
s.checkPendingTransaction()
|
||||
assert.NoError(t, err)
|
||||
|
||||
status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), originTxHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.TxStatusReplaced, status)
|
||||
|
||||
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 2)
|
||||
assert.Equal(t, types.TxStatusReplaced, txs[0].Status)
|
||||
assert.Equal(t, types.TxStatusPending, txs[1].Status)
|
||||
|
||||
// Check the pending transactions again after attempting to resubmit.
|
||||
s.checkPendingTransaction()
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 0)
|
||||
|
||||
s.Stop()
|
||||
patchGuard.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func testCheckPendingTransactionReplacedTxConfirmed(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.EscalateBlocks = 0
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeL1GasOracle, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
txHash, err := s.SendTransaction("test", &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 1)
|
||||
assert.Equal(t, types.TxStatusPending, txs[0].Status)
|
||||
assert.Equal(t, types.SenderTypeL1GasOracle, txs[0].SenderType)
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s.client, "TransactionReceipt", func(_ context.Context, hash common.Hash) (*gethTypes.Receipt, error) {
|
||||
var status types.TxStatus
|
||||
status, err = s.pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), hash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get transaction status, hash: %s, err: %w", hash.String(), err)
|
||||
}
|
||||
// If the transaction status is 'replaced', return a successful receipt.
|
||||
if status == types.TxStatusReplaced {
|
||||
return &gethTypes.Receipt{
|
||||
TxHash: hash,
|
||||
BlockNumber: big.NewInt(0),
|
||||
Status: gethTypes.ReceiptStatusSuccessful,
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("simulated transaction receipt error")
|
||||
})
|
||||
|
||||
// Attempt to resubmit the transaction.
|
||||
s.checkPendingTransaction()
|
||||
assert.NoError(t, err)
|
||||
|
||||
status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), txHash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.TxStatusReplaced, status)
|
||||
|
||||
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 2)
|
||||
assert.Equal(t, types.TxStatusReplaced, txs[0].Status)
|
||||
assert.Equal(t, types.TxStatusPending, txs[1].Status)
|
||||
|
||||
// Check the pending transactions again after attempting to resubmit.
|
||||
s.checkPendingTransaction()
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 0)
|
||||
|
||||
s.Stop()
|
||||
patchGuard.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
cutils "scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
@@ -51,7 +52,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.GasOracleSenderPrivateKey, "test", "test", nil)
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.GasOracleSenderPrivateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create several transactions and commit to block
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -33,6 +34,10 @@ var (
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) (err error) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
// Load config.
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -3,27 +3,29 @@ package orm
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *L2Block
|
||||
chunkOrm *Chunk
|
||||
batchOrm *Batch
|
||||
db *gorm.DB
|
||||
l2BlockOrm *L2Block
|
||||
chunkOrm *Chunk
|
||||
batchOrm *Batch
|
||||
pendingTransactionOrm *PendingTransaction
|
||||
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
@@ -60,6 +62,7 @@ func setupEnv(t *testing.T) {
|
||||
batchOrm = NewBatch(db)
|
||||
chunkOrm = NewChunk(db)
|
||||
l2BlockOrm = NewL2Block(db)
|
||||
pendingTransactionOrm = NewPendingTransaction(db)
|
||||
|
||||
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
@@ -310,3 +313,85 @@ func TestBatchOrm(t *testing.T) {
|
||||
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
|
||||
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
}
|
||||
|
||||
func TestTransactionOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
tx0 := gethTypes.NewTx(&gethTypes.DynamicFeeTx{
|
||||
Nonce: 0,
|
||||
To: &common.Address{},
|
||||
Data: []byte{},
|
||||
Gas: 21000,
|
||||
AccessList: gethTypes.AccessList{},
|
||||
Value: big.NewInt(0),
|
||||
ChainID: big.NewInt(1),
|
||||
GasTipCap: big.NewInt(0),
|
||||
GasFeeCap: big.NewInt(1),
|
||||
V: big.NewInt(0),
|
||||
R: big.NewInt(0),
|
||||
S: big.NewInt(0),
|
||||
})
|
||||
tx1 := gethTypes.NewTx(&gethTypes.DynamicFeeTx{
|
||||
Nonce: 0,
|
||||
To: &common.Address{},
|
||||
Data: []byte{},
|
||||
Gas: 42000,
|
||||
AccessList: gethTypes.AccessList{},
|
||||
Value: big.NewInt(0),
|
||||
ChainID: big.NewInt(1),
|
||||
GasTipCap: big.NewInt(1),
|
||||
GasFeeCap: big.NewInt(2),
|
||||
V: big.NewInt(0),
|
||||
R: big.NewInt(0),
|
||||
S: big.NewInt(0),
|
||||
})
|
||||
senderMeta := &SenderMeta{
|
||||
Name: "testName",
|
||||
Service: "testService",
|
||||
Address: common.HexToAddress("0x1"),
|
||||
Type: types.SenderTypeCommitBatch,
|
||||
}
|
||||
|
||||
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx0, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx1, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(context.Background(), tx0.Hash(), types.TxStatusReplaced)
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err := pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), senderMeta.Type, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 2)
|
||||
assert.Equal(t, tx1.Type(), txs[1].Type)
|
||||
assert.Equal(t, tx1.Nonce(), txs[1].Nonce)
|
||||
assert.Equal(t, tx1.Gas(), txs[1].GasLimit)
|
||||
assert.Equal(t, tx1.GasTipCap().Uint64(), txs[1].GasTipCap)
|
||||
assert.Equal(t, tx1.GasFeeCap().Uint64(), txs[1].GasFeeCap)
|
||||
assert.Equal(t, tx1.ChainId().Uint64(), txs[1].ChainID)
|
||||
assert.Equal(t, senderMeta.Name, txs[1].SenderName)
|
||||
assert.Equal(t, senderMeta.Service, txs[1].SenderService)
|
||||
assert.Equal(t, senderMeta.Address.String(), txs[1].SenderAddress)
|
||||
assert.Equal(t, senderMeta.Type, txs[1].SenderType)
|
||||
|
||||
err = pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(context.Background(), tx1.Hash(), types.TxStatusConfirmed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err = pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), senderMeta.Type, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 1)
|
||||
|
||||
err = pendingTransactionOrm.UpdateOtherTransactionsAsFailedByNonce(context.Background(), senderMeta.Address.String(), tx1.Nonce(), tx1.Hash())
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err = pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), senderMeta.Type, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, txs, 0)
|
||||
|
||||
status, err := pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), tx0.Hash())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.TxStatusConfirmedFailed, status)
|
||||
}
|
||||
|
||||
155
rollup/internal/orm/pending_transaction.go
Normal file
155
rollup/internal/orm/pending_transaction.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// SenderMeta holds the metadata for a transaction sender including the name, service, address and type.
|
||||
type SenderMeta struct {
|
||||
Name string
|
||||
Service string
|
||||
Address common.Address
|
||||
Type types.SenderType
|
||||
}
|
||||
|
||||
// PendingTransaction represents the structure of a transaction in the database.
|
||||
type PendingTransaction struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
ID uint `json:"id" gorm:"id;primaryKey"`
|
||||
ContextID string `json:"context_id" gorm:"context_id"`
|
||||
Hash string `json:"hash" gorm:"hash"`
|
||||
ChainID uint64 `json:"chain_id" gorm:"chain_id"`
|
||||
Type uint8 `json:"type" gorm:"type"`
|
||||
GasTipCap uint64 `json:"gas_tip_cap" gorm:"gas_tip_cap"`
|
||||
GasFeeCap uint64 `json:"gas_fee_cap" gorm:"gas_fee_cap"`
|
||||
GasLimit uint64 `json:"gas_limit" gorm:"gas_limit"`
|
||||
Nonce uint64 `json:"nonce" gorm:"nonce"`
|
||||
SubmitBlockNumber uint64 `json:"submit_block_number" gorm:"submit_block_number"`
|
||||
Status types.TxStatus `json:"status" gorm:"status"`
|
||||
RLPEncoding []byte `json:"rlp_encoding" gorm:"rlp_encoding"`
|
||||
SenderName string `json:"sender_name" gorm:"sender_name"`
|
||||
SenderService string `json:"sender_service" gorm:"sender_service"`
|
||||
SenderAddress string `json:"sender_address" gorm:"sender_address"`
|
||||
SenderType types.SenderType `json:"sender_type" gorm:"sender_type"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at"`
|
||||
}
|
||||
|
||||
// TableName returns the table name for the Transaction model.
|
||||
func (*PendingTransaction) TableName() string {
|
||||
return "pending_transaction"
|
||||
}
|
||||
|
||||
// NewPendingTransaction returns a new instance of PendingTransaction.
|
||||
func NewPendingTransaction(db *gorm.DB) *PendingTransaction {
|
||||
return &PendingTransaction{db: db}
|
||||
}
|
||||
|
||||
// GetTxStatusByTxHash retrieves the status of a transaction by its hash.
|
||||
func (o *PendingTransaction) GetTxStatusByTxHash(ctx context.Context, hash common.Hash) (types.TxStatus, error) {
|
||||
var status types.TxStatus
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&PendingTransaction{})
|
||||
db = db.Select("status")
|
||||
db = db.Where("hash = ?", hash.String())
|
||||
if err := db.Scan(&status).Error; err != nil {
|
||||
return types.TxStatusUnknown, fmt.Errorf("failed to get tx status by hash, hash: %v, err: %w", hash, err)
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// GetPendingOrReplacedTransactionsBySenderType retrieves pending or replaced transactions filtered by sender type, ordered by nonce, then gas_fee_cap (gas_price in legacy tx), and limited to a specified count.
|
||||
func (o *PendingTransaction) GetPendingOrReplacedTransactionsBySenderType(ctx context.Context, senderType types.SenderType, limit int) ([]PendingTransaction, error) {
|
||||
var transactions []PendingTransaction
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&PendingTransaction{})
|
||||
db = db.Where("sender_type = ?", senderType)
|
||||
db = db.Where("status = ? OR status = ?", types.TxStatusPending, types.TxStatusReplaced)
|
||||
db = db.Order("nonce asc")
|
||||
db = db.Order("gas_fee_cap asc")
|
||||
db = db.Limit(limit)
|
||||
if err := db.Find(&transactions).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to get pending or replaced transactions by sender type, error: %w", err)
|
||||
}
|
||||
return transactions, nil
|
||||
}
|
||||
|
||||
// InsertPendingTransaction creates a new pending transaction record and stores it in the database.
|
||||
func (o *PendingTransaction) InsertPendingTransaction(ctx context.Context, contextID string, senderMeta *SenderMeta, tx *gethTypes.Transaction, submitBlockNumber uint64, dbTX ...*gorm.DB) error {
|
||||
rlp := new(bytes.Buffer)
|
||||
if err := tx.EncodeRLP(rlp); err != nil {
|
||||
return fmt.Errorf("failed to encode rlp, err: %w", err)
|
||||
}
|
||||
|
||||
newTransaction := &PendingTransaction{
|
||||
ContextID: contextID,
|
||||
Hash: tx.Hash().String(),
|
||||
Type: tx.Type(),
|
||||
ChainID: tx.ChainId().Uint64(),
|
||||
GasFeeCap: tx.GasFeeCap().Uint64(),
|
||||
GasTipCap: tx.GasTipCap().Uint64(),
|
||||
GasLimit: tx.Gas(),
|
||||
Nonce: tx.Nonce(),
|
||||
SubmitBlockNumber: submitBlockNumber,
|
||||
Status: types.TxStatusPending,
|
||||
RLPEncoding: rlp.Bytes(),
|
||||
SenderName: senderMeta.Name,
|
||||
SenderAddress: senderMeta.Address.String(),
|
||||
SenderService: senderMeta.Service,
|
||||
SenderType: senderMeta.Type,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&PendingTransaction{})
|
||||
if err := db.Create(newTransaction).Error; err != nil {
|
||||
return fmt.Errorf("failed to InsertTransaction, error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePendingTransactionStatusByTxHash updates the status of a transaction based on the transaction hash.
|
||||
func (o *PendingTransaction) UpdatePendingTransactionStatusByTxHash(ctx context.Context, hash common.Hash, status types.TxStatus, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&PendingTransaction{})
|
||||
db = db.Where("hash = ?", hash.String())
|
||||
if err := db.Update("status", status).Error; err != nil {
|
||||
return fmt.Errorf("failed to UpdatePendingTransactionStatusByTxHash, txHash: %s, error: %w", hash, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateOtherTransactionsAsFailedByNonce updates the status of all transactions to TxStatusConfirmedFailed for a specific nonce and sender address, excluding a specified transaction hash.
|
||||
func (o *PendingTransaction) UpdateOtherTransactionsAsFailedByNonce(ctx context.Context, senderAddress string, nonce uint64, hash common.Hash, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&PendingTransaction{})
|
||||
db = db.Where("sender_address = ?", senderAddress)
|
||||
db = db.Where("nonce = ?", nonce)
|
||||
db = db.Where("hash != ?", hash.String())
|
||||
if err := db.Update("status", types.TxStatusConfirmedFailed).Error; err != nil {
|
||||
return fmt.Errorf("failed to update other transactions as failed by nonce, senderAddress: %s, nonce: %d, txHash: %s, error: %w", senderAddress, nonce, hash, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -79,16 +79,16 @@ contract MockBridgeL1 {
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Message nonce, used to avoid relay attack.
|
||||
uint256 public messageNonce;
|
||||
|
||||
mapping(uint256 => bytes32) public committedBatches;
|
||||
uint256 public l2BaseFee;
|
||||
|
||||
/***********************************
|
||||
* Functions from L2GasPriceOracle *
|
||||
***********************************/
|
||||
|
||||
function setL2BaseFee(uint256) external {
|
||||
function setL2BaseFee(uint256 _newL2BaseFee) external {
|
||||
l2BaseFee = _newL2BaseFee;
|
||||
}
|
||||
|
||||
/************************************
|
||||
@@ -128,6 +128,10 @@ contract MockBridgeL1 {
|
||||
* Functions from ScrollChain *
|
||||
******************************/
|
||||
|
||||
/// @notice Import layer 2 genesis block
|
||||
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
|
||||
}
|
||||
|
||||
function commitBatch(
|
||||
uint8 /*version*/,
|
||||
bytes calldata _parentBatchHeader,
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -68,6 +70,10 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
base.RunImages(t)
|
||||
|
||||
var err error
|
||||
@@ -138,15 +144,10 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestProcessStartEnableMetrics", testProcessStartEnableMetrics)
|
||||
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch)
|
||||
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
|
||||
|
||||
// l1 message
|
||||
|
||||
// l2 message
|
||||
// TODO: add a "user relay l2msg Succeed" test
|
||||
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
t.Run("TestImportL2GasPrice", testImportL2GasPrice)
|
||||
|
||||
}
|
||||
|
||||
@@ -20,6 +20,34 @@ import (
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func testCommitAndFinalizeGenesisBatch(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, true, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, l2Relayer)
|
||||
|
||||
genesisChunkHash := common.HexToHash("0x00e076380b00a3749816fcc9a2a576b529952ef463222a54544d21b7d434dfe1")
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
dbChunk, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunk, 1)
|
||||
assert.Equal(t, genesisChunkHash.String(), dbChunk[0].Hash)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(dbChunk[0].ProvingStatus))
|
||||
|
||||
genesisBatchHash := common.HexToHash("0x2d214b024f5337d83a5681f88575ab225f345ec2e4e3ce53cf4dc4b0cb5c96b1")
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, genesisBatchHash.String(), batch.Hash)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(batch.ProvingStatus))
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(batch.RollupStatus))
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
Reference in New Issue
Block a user