Compare commits

..

32 Commits

Author SHA1 Message Date
georgehao
3d0c82a531 feat: update 2024-02-03 17:31:20 +08:00
georgehao
c23120dcb0 feat: update 2024-02-02 09:26:29 +08:00
georgehao
e52b6351d9 feat: update 2024-02-01 18:00:07 +08:00
georgehao
c3ec9ebdac feat: update 2024-02-01 17:56:04 +08:00
georgehao
f98a932539 feat: update 2024-02-01 11:46:24 +08:00
georgehao
60d26492ec feat: update 2024-02-01 11:40:47 +08:00
georgehao
6e1ca34ada feat: update 2024-01-30 09:28:33 +08:00
georgehao
002e2c09e4 feat: update 2024-01-29 22:46:57 +08:00
georgehao
5bac821b30 feat: update 2024-01-29 22:42:51 +08:00
georgehao
8ed09c6ba0 feat: update 2024-01-29 22:25:02 +08:00
georgehao
90007e2b14 feat: update 2024-01-29 22:23:56 +08:00
georgehao
d1fb1b05ef feat: update 2024-01-29 21:16:40 +08:00
georgehao
86712e4f81 feat: update 2024-01-29 21:12:46 +08:00
georgehao
e8fb16a600 feat: update 2024-01-29 20:56:25 +08:00
georgehao
9b4808d018 feat: update 2024-01-29 17:22:45 +08:00
georgehao
c93bf62737 feat: update 2024-01-29 17:19:42 +08:00
georgehao
cd2e84973d feat: update 2024-01-29 17:17:54 +08:00
georgehao
68c1affcf5 feat: update 2024-01-29 17:01:42 +08:00
georgehao
5d901bd833 feat: update 2024-01-29 16:19:20 +08:00
georgehao
3f2379a2bf feat: update 2024-01-29 16:12:44 +08:00
georgehao
3e44abf883 feat: add rust unit test 2024-01-29 15:28:17 +08:00
georgehao
98c2a6afb4 feat: update 2024-01-24 10:34:45 +08:00
georgehao
7c49bdb3c6 Merge branch 'develop' of github.com:scroll-tech/scroll into feat/prover_oom 2024-01-23 00:43:40 +08:00
georgehao
8135df369b feat: update 2024-01-22 16:05:45 +08:00
georgehao
e5a5b4a951 feat: update 2024-01-22 15:54:56 +08:00
georgehao
c59e2689bb feat: update 2024-01-22 15:00:51 +08:00
georgehao
33a6589e69 feat: update 2024-01-22 14:34:03 +08:00
georgehao
178b46b8cb feat: update 2024-01-19 11:19:08 +08:00
georgehao
b3b0cad8e4 feat: add proof 2024-01-19 11:15:15 +08:00
georgehao
fb857a5107 feat: update 2024-01-18 17:15:41 +08:00
georgehao
cecbef5557 feat: update 2024-01-18 17:03:59 +08:00
georgehao
50a236da9f feat: add prover oom test 2024-01-18 16:57:04 +08:00
62 changed files with 1600 additions and 3914 deletions

View File

@@ -5,9 +5,6 @@ on:
tags:
- v**
env:
AWS_REGION: us-west-2
jobs:
event_watcher:
runs-on: ubuntu-latest
@@ -32,29 +29,6 @@ jobs:
scrolltech/event-watcher:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
# build and push to aws ecr
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
- name: Build, tag, and push image to Amazon ECR
id: build-image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: event-watcher
IMAGE_TAG: ${{github.ref_name}}
run: |
# Build a docker container and push it to ECR
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
gas_oracle:
runs-on: ubuntu-latest
steps:
@@ -78,30 +52,6 @@ jobs:
scrolltech/gas-oracle:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
# build and push to aws ecr
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
- name: Build, tag, and push image to Amazon ECR
id: build-image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: gas-oracle
IMAGE_TAG: ${{github.ref_name}}
run: |
# Build a docker container and push it to ECR
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
rollup_relayer:
runs-on: ubuntu-latest
steps:
@@ -125,124 +75,52 @@ jobs:
scrolltech/rollup-relayer:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
# build and push to aws ecr
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
- name: Build, tag, and push image to Amazon ECR
id: build-image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: rollup-relayer
IMAGE_TAG: ${{github.ref_name}}
run: |
# Build a docker container and push it to ECR
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
bridgehistoryapi-fetcher:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-fetcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
push: true
tags: |
scrolltech/bridgehistoryapi-fetcher:${{github.ref_name}}
scrolltech/bridgehistoryapi-fetcher:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
# build and push to aws ecr
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
- name: Build, tag, and push image to Amazon ECR
id: build-image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: bridgehistoryapi-fetcher
IMAGE_TAG: ${{github.ref_name}}
run: |
# Build a docker container and push it to ECR
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-fetcher docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
push: true
tags: |
scrolltech/bridgehistoryapi-fetcher:${{github.ref_name}}
scrolltech/bridgehistoryapi-fetcher:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-api:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-api docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
push: true
tags: |
scrolltech/bridgehistoryapi-api:${{github.ref_name}}
scrolltech/bridgehistoryapi-api:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
# build and push to aws ecr
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
- name: Build, tag, and push image to Amazon ECR
id: build-image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: bridgehistoryapi-api
IMAGE_TAG: ${{github.ref_name}}
run: |
# Build a docker container and push it to ECR
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-api docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
push: true
tags: |
scrolltech/bridgehistoryapi-api:${{github.ref_name}}
scrolltech/bridgehistoryapi-api:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
coordinator-api:
runs-on: ubuntu-latest
steps:
@@ -266,30 +144,6 @@ jobs:
scrolltech/coordinator-api:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
# build and push to aws ecr
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
- name: Build, tag, and push image to Amazon ECR
id: build-image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: coordinator-api
IMAGE_TAG: ${{github.ref_name}}
run: |
# Build a docker container and push it to ECR
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
coordinator-cron:
runs-on: ubuntu-latest
steps:
@@ -313,26 +167,3 @@ jobs:
scrolltech/coordinator-cron:latest
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
# build and push to aws ecr
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
- name: Build, tag, and push image to Amazon ECR
id: build-image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: coordinator-cron
IMAGE_TAG: ${{github.ref_name}}
run: |
# Build a docker container and push it to ECR
# docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
docker tag scrolltech/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2022-2024 Scroll
Copyright (c) 2022-2023 Scroll
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -3,8 +3,8 @@
"confirmation": 0,
"endpoint": "https://rpc.ankr.com/eth",
"startHeight": 18306000,
"blockTime": 12,
"fetchLimit": 16,
"blockTime": 10,
"fetchLimit": 30,
"MessengerAddr": "0x6774Bcbd5ceCeF1336b5300fb5186a12DDD8b367",
"ETHGatewayAddr": "0x7F2b8C31F88B6006c382775eea88297Ec1e3E905",
"WETHGatewayAddr": "0x7AC440cAe8EB6328de4fA621163a792c1EA9D4fE",
@@ -17,14 +17,13 @@
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
"bypassReorgDetection": false
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
},
"L2": {
"confirmation": 0,
"endpoint": "https://rpc.scroll.io",
"blockTime": 3,
"fetchLimit": 64,
"fetchLimit": 100,
"MessengerAddr": "0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC",
"ETHGatewayAddr": "0x6EA73e05AdC79974B931123675ea8F78FfdacDF0",
"WETHGatewayAddr": "0x7003E7B7186f0E6601203b99F7B8DECBfA391cf9",

View File

@@ -6,6 +6,7 @@ require (
github.com/gin-contrib/cors v1.5.0
github.com/gin-gonic/gin v1.9.1
github.com/go-redis/redis/v8 v8.11.5
github.com/google/uuid v1.4.0
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.14.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c
@@ -39,7 +40,6 @@ require (
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect

View File

@@ -8,11 +8,11 @@ import (
"scroll-tech/common/database"
)
// FetcherConfig is the configuration of Layer1 or Layer2 fetcher.
type FetcherConfig struct {
// LayerConfig is the configuration of Layer1/Layer2
type LayerConfig struct {
Confirmation uint64 `json:"confirmation"`
Endpoint string `json:"endpoint"`
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, message proof should be updated from the very beginning.
StartHeight uint64 `json:"startHeight"` // Can only be configured to contract deployment height, otherwise in the current implementation, the message proof could not be successfully updated.
BlockTime int64 `json:"blockTime"`
FetchLimit uint64 `json:"fetchLimit"`
MessengerAddr string `json:"MessengerAddr"`
@@ -28,7 +28,6 @@ type FetcherConfig struct {
ScrollChainAddr string `json:"ScrollChainAddr"`
GatewayRouterAddr string `json:"GatewayRouterAddr"`
MessageQueueAddr string `json:"MessageQueueAddr"`
BypassReorgDetection bool `json:"bypassReorgDetection"`
}
// RedisConfig redis config
@@ -44,8 +43,8 @@ type RedisConfig struct {
// Config is the configuration of the bridge history backend
type Config struct {
L1 *FetcherConfig `json:"L1"`
L2 *FetcherConfig `json:"L2"`
L1 *LayerConfig `json:"L1"`
L2 *LayerConfig `json:"L2"`
DB *database.Config `json:"db"`
Redis *RedisConfig `json:"redis"`
}

View File

@@ -20,7 +20,7 @@ import (
// L1MessageFetcher fetches cross message events from L1 and saves them to database.
type L1MessageFetcher struct {
ctx context.Context
cfg *config.FetcherConfig
cfg *config.LayerConfig
client *ethclient.Client
l1SyncHeight uint64
@@ -35,7 +35,7 @@ type L1MessageFetcher struct {
}
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
func NewL1MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
c := &L1MessageFetcher{
ctx: ctx,
cfg: cfg,
@@ -108,6 +108,7 @@ func (c *L1MessageFetcher) Start() {
}
func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
c.l1MessageFetcherRunningTotal.Inc()
startHeight := c.l1SyncHeight + 1
endHeight, rpcErr := utils.GetBlockNumber(c.ctx, c.client, confirmation)
if rpcErr != nil {
@@ -133,7 +134,6 @@ func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
c.l1MessageFetcherReorgTotal.Inc()
log.Warn("L1 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
c.updateL1SyncHeight(resyncHeight, lastBlockHash)
c.l1MessageFetcherRunningTotal.Inc()
return
}
@@ -143,7 +143,6 @@ func (c *L1MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
}
c.updateL1SyncHeight(to, lastBlockHash)
c.l1MessageFetcherRunningTotal.Inc()
}
}

View File

@@ -20,7 +20,7 @@ import (
// L2MessageFetcher fetches cross message events from L2 and saves them to database.
type L2MessageFetcher struct {
ctx context.Context
cfg *config.FetcherConfig
cfg *config.LayerConfig
db *gorm.DB
client *ethclient.Client
l2SyncHeight uint64
@@ -35,7 +35,7 @@ type L2MessageFetcher struct {
}
// NewL2MessageFetcher creates a new L2MessageFetcher instance.
func NewL2MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
func NewL2MessageFetcher(ctx context.Context, cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2MessageFetcher {
c := &L2MessageFetcher{
ctx: ctx,
cfg: cfg,
@@ -110,6 +110,7 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
return
}
log.Info("fetch and save missing L2 events", "start height", startHeight, "end height", endHeight, "confirmation", confirmation)
c.l2MessageFetcherRunningTotal.Inc()
for from := startHeight; from <= endHeight; from += c.cfg.FetchLimit {
to := from + c.cfg.FetchLimit - 1
@@ -127,7 +128,6 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
c.l2MessageFetcherReorgTotal.Inc()
log.Warn("L2 reorg happened, exit and re-enter fetchAndSaveEvents", "re-sync height", resyncHeight)
c.updateL2SyncHeight(resyncHeight, lastBlockHash)
c.l2MessageFetcherRunningTotal.Inc()
return
}
@@ -142,7 +142,6 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
}
c.updateL2SyncHeight(to, lastBlockHash)
c.l2MessageFetcherRunningTotal.Inc()
}
}

View File

@@ -76,30 +76,39 @@ func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (ui
// L1InsertOrUpdate inserts or updates l1 messages
func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult *L1FilterResult) error {
if err := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages); err != nil {
log.Error("failed to insert L1 deposit messages", "err", err)
err := b.db.Transaction(func(tx *gorm.DB) error {
if txErr := b.crossMessageOrm.InsertOrUpdateL1Messages(ctx, l1FetcherResult.DepositMessages, tx); txErr != nil {
log.Error("failed to insert L1 deposit messages", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages, tx); txErr != nil {
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", txErr)
return txErr
}
if txErr := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents, tx); txErr != nil {
log.Error("failed to insert or update batch events", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents, tx); txErr != nil {
log.Error("failed to insert L1 message queue events", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l1FetcherResult.RevertedTxs, tx); txErr != nil {
log.Error("failed to insert L1 failed gateway router transactions", "err", txErr)
return txErr
}
return nil
})
if err != nil {
log.Error("failed to update db of L1 events", "err", err)
return err
}
if err := b.crossMessageOrm.InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx, l1FetcherResult.RelayedMessages); err != nil {
log.Error("failed to update L1 relayed messages of L2 withdrawals", "err", err)
return err
}
if err := b.batchEventOrm.InsertOrUpdateBatchEvents(ctx, l1FetcherResult.BatchEvents); err != nil {
log.Error("failed to insert or update batch events", "err", err)
return err
}
if err := b.crossMessageOrm.UpdateL1MessageQueueEventsInfo(ctx, l1FetcherResult.MessageQueueEvents); err != nil {
log.Error("failed to insert L1 message queue events", "err", err)
return err
}
if err := b.crossMessageOrm.InsertFailedL1GatewayTxs(ctx, l1FetcherResult.RevertedTxs); err != nil {
log.Error("failed to insert failed L1 gateway transactions", "err", err)
return err
}
return nil
}
@@ -177,18 +186,24 @@ func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, heig
// L2InsertOrUpdate inserts or updates L2 messages
func (b *EventUpdateLogic) L2InsertOrUpdate(ctx context.Context, l2FetcherResult *L2FilterResult) error {
if err := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages); err != nil {
log.Error("failed to insert L2 withdrawal messages", "err", err)
return err
}
err := b.db.Transaction(func(tx *gorm.DB) error {
if txErr := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages, tx); txErr != nil {
log.Error("failed to insert L2 withdrawal messages", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages, tx); txErr != nil {
log.Error("failed to update L2 relayed messages of L1 deposits", "err", txErr)
return txErr
}
if txErr := b.crossMessageOrm.InsertFailedGatewayRouterTxs(ctx, l2FetcherResult.OtherRevertedTxs, tx); txErr != nil {
log.Error("failed to insert L2 failed gateway router transactions", "err", txErr)
return txErr
}
return nil
})
if err := b.crossMessageOrm.InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx, l2FetcherResult.RelayedMessages); err != nil {
log.Error("failed to update L2 relayed messages of L1 deposits", "err", err)
return err
}
if err := b.crossMessageOrm.InsertFailedL2GatewayTxs(ctx, l2FetcherResult.OtherRevertedTxs); err != nil {
log.Error("failed to insert failed L2 gateway transactions", "err", err)
if err != nil {
log.Error("failed to update db of L2 events", "err", err)
return err
}
return nil

View File

@@ -2,7 +2,6 @@ package logic
import (
"context"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -11,27 +10,21 @@ import (
"github.com/scroll-tech/go-ethereum/log"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/utils"
)
// L1EventParser the l1 event parser
type L1EventParser struct {
cfg *config.FetcherConfig
client *ethclient.Client
}
// NewL1EventParser creates l1 event parser
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1EventParser {
return &L1EventParser{
cfg: cfg,
client: client,
}
func NewL1EventParser() *L1EventParser {
return &L1EventParser{}
}
// ParseL1CrossChainEventLogs parses L1 watched cross chain events.
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
func (e *L1EventParser) ParseL1CrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l1DepositMessages []*orm.CrossMessage
var l1RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
@@ -39,7 +32,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
case backendabi.L1DepositETHSig:
event := backendabi.ETHMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ETHGatewayABI, &event, "DepositETH", vlog); err != nil {
log.Error("Failed to unpack DepositETH event", "err", err)
log.Warn("Failed to unpack DepositETH event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
@@ -51,7 +44,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.IL1ERC20GatewayABI, &event, "DepositERC20", vlog)
if err != nil {
log.Error("Failed to unpack DepositERC20 event", "err", err)
log.Warn("Failed to unpack DepositERC20 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
@@ -64,7 +57,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
case backendabi.L1DepositERC721Sig:
event := backendabi.ERC721MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "DepositERC721", vlog); err != nil {
log.Error("Failed to unpack DepositERC721 event", "err", err)
log.Warn("Failed to unpack DepositERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
@@ -77,7 +70,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
case backendabi.L1BatchDepositERC721Sig:
event := backendabi.BatchERC721MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC721GatewayABI, &event, "BatchDepositERC721", vlog); err != nil {
log.Error("Failed to unpack BatchDepositERC721 event", "err", err)
log.Warn("Failed to unpack BatchDepositERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
@@ -90,7 +83,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
case backendabi.L1DepositERC1155Sig:
event := backendabi.ERC1155MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "DepositERC1155", vlog); err != nil {
log.Error("Failed to unpack DepositERC1155 event", "err", err)
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
@@ -104,7 +97,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
case backendabi.L1BatchDepositERC1155Sig:
event := backendabi.BatchERC1155MessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ERC1155GatewayABI, &event, "BatchDepositERC1155", vlog); err != nil {
log.Error("Failed to unpack BatchDepositERC1155 event", "err", err)
log.Warn("Failed to unpack BatchDepositERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
@@ -118,17 +111,12 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
case backendabi.L1SentMessageEventSig:
event := backendabi.L1SentMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
log.Error("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
from, err := getRealFromAddress(ctx, event.Sender, e.client, vlog.TxHash, e.cfg.GatewayRouterAddr)
if err != nil {
log.Error("Failed to get real 'from' address", "err", err)
log.Warn("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
l1DepositMessages = append(l1DepositMessages, &orm.CrossMessage{
L1BlockNumber: vlog.BlockNumber,
Sender: from,
Sender: event.Sender.String(),
Receiver: event.Target.String(),
TokenType: int(orm.TokenTypeETH),
L1TxHash: vlog.TxHash.String(),
@@ -142,7 +130,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
case backendabi.L1RelayedMessageEventSig:
event := backendabi.L1RelayedMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "RelayedMessage", vlog); err != nil {
log.Error("Failed to unpack RelayedMessage event", "err", err)
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return nil, nil, err
}
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
@@ -155,7 +143,7 @@ func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []t
case backendabi.L1FailedRelayedMessageEventSig:
event := backendabi.L1FailedRelayedMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "FailedRelayedMessage", vlog); err != nil {
log.Error("Failed to unpack FailedRelayedMessage event", "err", err)
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
return nil, nil, err
}
l1RelayedMessages = append(l1RelayedMessages, &orm.CrossMessage{
@@ -178,17 +166,17 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
case backendabi.L1CommitBatchEventSig:
event := backendabi.L1CommitBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "CommitBatch", vlog); err != nil {
log.Error("Failed to unpack CommitBatch event", "err", err)
log.Warn("Failed to unpack CommitBatch event", "err", err)
return nil, err
}
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending {
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
return nil, err
}
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
if err != nil {
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
@@ -202,7 +190,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
case backendabi.L1RevertBatchEventSig:
event := backendabi.L1RevertBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
log.Error("Failed to unpack RevertBatch event", "err", err)
log.Warn("Failed to unpack RevertBatch event", "err", err)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
@@ -214,7 +202,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
case backendabi.L1FinalizeBatchEventSig:
event := backendabi.L1FinalizeBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
log.Error("Failed to unpack FinalizeBatch event", "err", err)
log.Warn("Failed to unpack FinalizeBatch event", "err", err)
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
@@ -241,7 +229,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
case backendabi.L1QueueTransactionEventSig:
event := backendabi.L1QueueTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "QueueTransaction", vlog); err != nil {
log.Error("Failed to unpack QueueTransaction event", "err", err)
log.Warn("Failed to unpack QueueTransaction event", "err", err)
return nil, err
}
messageHash := common.BytesToHash(crypto.Keccak256(event.Data))
@@ -257,7 +245,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
case backendabi.L1DequeueTransactionEventSig:
event := backendabi.L1DequeueTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DequeueTransaction", vlog); err != nil {
log.Error("Failed to unpack DequeueTransaction event", "err", err)
log.Warn("Failed to unpack DequeueTransaction event", "err", err)
return nil, err
}
skippedIndices := utils.GetSkippedQueueIndices(event.StartIndex.Uint64(), event.SkippedBitmap)
@@ -270,7 +258,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
case backendabi.L1DropTransactionEventSig:
event := backendabi.L1DropTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil {
log.Error("Failed to unpack DropTransaction event", "err", err)
log.Warn("Failed to unpack DropTransaction event", "err", err)
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
@@ -282,27 +270,3 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
}
return l1MessageQueueEvents, nil
}
func getRealFromAddress(ctx context.Context, eventSender common.Address, client *ethclient.Client, txHash common.Hash, gatewayRouterAddr string) (string, error) {
from := eventSender.String()
if from == gatewayRouterAddr {
tx, isPending, rpcErr := client.TransactionByHash(ctx, txHash)
if rpcErr != nil || isPending {
log.Error("Failed to get transaction or the transaction is still pending", "rpcErr", rpcErr, "isPending", isPending)
return "", rpcErr
}
// Case 1: deposit/withdraw ETH: EOA -> multisig -> gateway router -> messenger.
if tx.To() != nil && (*tx.To()).String() != gatewayRouterAddr {
return (*tx.To()).String(), nil
}
// Case 2: deposit/withdraw ETH: EOA -> gateway router -> messenger.
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, err := signer.Sender(tx)
if err != nil {
log.Error("Get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", err)
return "", err
}
return sender.String(), nil
}
return from, nil
}

View File

@@ -34,10 +34,9 @@ type L1FilterResult struct {
// L1FetcherLogic the L1 fetcher logic
type L1FetcherLogic struct {
cfg *config.FetcherConfig
cfg *config.LayerConfig
client *ethclient.Client
addressList []common.Address
gatewayList []common.Address
parser *L1EventParser
db *gorm.DB
crossMessageOrm *orm.CrossMessage
@@ -47,7 +46,7 @@ type L1FetcherLogic struct {
}
// NewL1FetcherLogic creates L1 fetcher logic
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
func NewL1FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
addressList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
@@ -66,40 +65,16 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.MessageQueueAddr),
}
gatewayList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
common.HexToAddress(cfg.ERC1155GatewayAddr),
common.HexToAddress(cfg.MessengerAddr),
common.HexToAddress(cfg.GatewayRouterAddr),
}
// Optional erc20 gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
}
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
// The walkaround is used when SDK mismatches the upstream.
if cfg.BypassReorgDetection {
log.Warn("bypass reorg detction in L1, setting confirmation as L1ReorgSafeDepth (64)")
cfg.Confirmation = L1ReorgSafeDepth
}
log.Info("NewL2FetcherLogic", "bypassReorgDetection", cfg.BypassReorgDetection, "confirmation", cfg.Confirmation, "addresses", addressList, "gateways", gatewayList)
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList)
f := &L1FetcherLogic{
db: db,
@@ -108,8 +83,7 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
cfg: cfg,
client: client,
addressList: addressList,
gatewayList: gatewayList,
parser: NewL1EventParser(cfg, client),
parser: NewL1EventParser(),
}
reg := prometheus.DefaultRegisterer
@@ -157,9 +131,15 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
blockTimestampsMap[block.NumberU64()] = block.Time()
for _, tx := range block.Transactions() {
// Gateways: L1 deposit.
txTo := tx.To()
if txTo == nil {
continue
}
toAddress := txTo.String()
// GatewayRouter: L1 deposit.
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
if !isTransactionToGateway(tx, f.gatewayList) {
if toAddress != f.cfg.GatewayRouterAddr && toAddress != f.cfg.MessengerAddr {
continue
}
@@ -253,7 +233,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
return false, 0, common.Hash{}, nil, err
}
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L1 cross chain event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err

View File

@@ -1,35 +1,26 @@
package logic
import (
"context"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/utils"
)
// L2EventParser the L2 event parser
type L2EventParser struct {
cfg *config.FetcherConfig
client *ethclient.Client
}
// NewL2EventParser creates the L2 event parser
func NewL2EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L2EventParser {
return &L2EventParser{
cfg: cfg,
client: client,
}
func NewL2EventParser() *L2EventParser {
return &L2EventParser{}
}
// ParseL2EventLogs parses L2 watched events
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
func (e *L2EventParser) ParseL2EventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l2WithdrawMessages []*orm.CrossMessage
var l2RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
@@ -38,7 +29,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
event := backendabi.ETHMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ETHGatewayABI, &event, "WithdrawETH", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawETH event", "err", err)
log.Warn("Failed to unpack WithdrawETH event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
@@ -50,7 +41,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
event := backendabi.ERC20MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC20GatewayABI, &event, "WithdrawERC20", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawERC20 event", "err", err)
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
@@ -64,7 +55,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
event := backendabi.ERC721MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawERC721 event", "err", err)
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
@@ -78,7 +69,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
event := backendabi.BatchERC721MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC721GatewayABI, &event, "BatchWithdrawERC721", vlog)
if err != nil {
log.Error("Failed to unpack BatchWithdrawERC721 event", "err", err)
log.Warn("Failed to unpack BatchWithdrawERC721 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
@@ -92,7 +83,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
event := backendabi.ERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
if err != nil {
log.Error("Failed to unpack WithdrawERC1155 event", "err", err)
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
@@ -107,7 +98,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
event := backendabi.BatchERC1155MessageEvent{}
err := utils.UnpackLog(backendabi.IL2ERC1155GatewayABI, &event, "BatchWithdrawERC1155", vlog)
if err != nil {
log.Error("Failed to unpack BatchWithdrawERC1155 event", "err", err)
log.Warn("Failed to unpack BatchWithdrawERC1155 event", "err", err)
return nil, nil, err
}
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
@@ -122,17 +113,12 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
event := backendabi.L2SentMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "SentMessage", vlog)
if err != nil {
log.Error("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
from, err := getRealFromAddress(ctx, event.Sender, e.client, vlog.TxHash, e.cfg.GatewayRouterAddr)
if err != nil {
log.Error("Failed to get real 'from' address", "err", err)
log.Warn("Failed to unpack SentMessage event", "err", err)
return nil, nil, err
}
l2WithdrawMessages = append(l2WithdrawMessages, &orm.CrossMessage{
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
Sender: from,
Sender: event.Sender.String(),
Receiver: event.Target.String(),
TokenType: int(orm.TokenTypeETH),
L2TxHash: vlog.TxHash.String(),
@@ -151,7 +137,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
event := backendabi.L2RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "RelayedMessage", vlog)
if err != nil {
log.Error("Failed to unpack RelayedMessage event", "err", err)
log.Warn("Failed to unpack RelayedMessage event", "err", err)
return nil, nil, err
}
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{
@@ -165,7 +151,7 @@ func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log,
event := backendabi.L2RelayedMessageEvent{}
err := utils.UnpackLog(backendabi.IL2ScrollMessengerABI, &event, "FailedRelayedMessage", vlog)
if err != nil {
log.Error("Failed to unpack FailedRelayedMessage event", "err", err)
log.Warn("Failed to unpack FailedRelayedMessage event", "err", err)
return nil, nil, err
}
l2RelayedMessages = append(l2RelayedMessages, &orm.CrossMessage{

View File

@@ -33,10 +33,9 @@ type L2FilterResult struct {
// L2FetcherLogic the L2 fetcher logic
type L2FetcherLogic struct {
cfg *config.FetcherConfig
cfg *config.LayerConfig
client *ethclient.Client
addressList []common.Address
gatewayList []common.Address
parser *L2EventParser
db *gorm.DB
crossMessageOrm *orm.CrossMessage
@@ -46,7 +45,7 @@ type L2FetcherLogic struct {
}
// NewL2FetcherLogic create L2 fetcher logic
func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
func NewL2FetcherLogic(cfg *config.LayerConfig, db *gorm.DB, client *ethclient.Client) *L2FetcherLogic {
addressList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
@@ -61,38 +60,16 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.MessengerAddr),
}
gatewayList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
common.HexToAddress(cfg.ERC1155GatewayAddr),
common.HexToAddress(cfg.MessengerAddr),
common.HexToAddress(cfg.GatewayRouterAddr),
}
// Optional erc20 gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
}
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
}
if cfg.BypassReorgDetection {
log.Crit("Never bypass reorg detection in L2")
}
log.Info("NewL2FetcherLogic", "bypassReorgDetection", cfg.BypassReorgDetection, "confirmation", cfg.Confirmation, "addresses", addressList, "gateways", gatewayList)
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList)
f := &L2FetcherLogic{
db: db,
@@ -101,8 +78,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
cfg: cfg,
client: client,
addressList: addressList,
gatewayList: gatewayList,
parser: NewL2EventParser(cfg, client),
parser: NewL2EventParser(),
}
reg := prometheus.DefaultRegisterer
@@ -151,7 +127,42 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
blockTimestampsMap[block.NumberU64()] = block.Time()
for _, tx := range block.Transactions() {
if tx.IsL1MessageTx() {
txTo := tx.To()
if txTo == nil {
continue
}
toAddress := txTo.String()
// GatewayRouter: L2 withdrawal.
if toAddress == f.cfg.GatewayRouterAddr {
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, nil, nil, receiptErr
}
// Check if the transaction is failed
if receipt.Status == types.ReceiptStatusFailed {
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, signerErr := signer.Sender(tx)
if signerErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
return nil, nil, nil, signerErr
}
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
L2TxHash: tx.Hash().String(),
MessageType: int(orm.MessageTypeL2SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L2BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(orm.TxStatusTypeSentTxReverted),
})
}
}
if tx.Type() == types.L1MessageTxType {
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
@@ -168,38 +179,6 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
MessageType: int(orm.MessageTypeL1SentMessage),
})
}
continue
}
// Gateways: L2 withdrawal.
if !isTransactionToGateway(tx, f.gatewayList) {
continue
}
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, nil, nil, receiptErr
}
// Check if the transaction is failed
if receipt.Status == types.ReceiptStatusFailed {
signer := types.LatestSignerForChainID(new(big.Int).SetUint64(tx.ChainId().Uint64()))
sender, signerErr := signer.Sender(tx)
if signerErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", signerErr)
return nil, nil, nil, signerErr
}
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
L2TxHash: tx.Hash().String(),
MessageType: int(orm.MessageTypeL2SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L2BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(orm.TxStatusTypeSentTxReverted),
})
}
}
}
@@ -256,7 +235,7 @@ func (f *L2FetcherLogic) L2Fetcher(ctx context.Context, from, to uint64, lastBlo
return false, 0, common.Hash{}, nil, err
}
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L2 event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
@@ -300,15 +279,3 @@ func (f *L2FetcherLogic) updateMetrics(res L2FilterResult) {
}
}
}
func isTransactionToGateway(tx *types.Transaction, gatewayList []common.Address) bool {
if tx.To() == nil {
return false
}
for _, gateway := range gatewayList {
if *tx.To() == gateway {
return true
}
}
return false
}

View File

@@ -6,7 +6,6 @@ import (
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// BatchStatusType represents the type of batch status.
@@ -90,21 +89,19 @@ func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, block
}
// InsertOrUpdateBatchEvents inserts a new batch event or updates an existing one based on the BatchStatusType.
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent) error {
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent, dbTX ...*gorm.DB) error {
for _, l1BatchEvent := range l1BatchEvents {
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&BatchEvent{})
updateFields := make(map[string]interface{})
switch BatchStatusType(l1BatchEvent.BatchStatus) {
case BatchStatusTypeCommitted:
// Use the clause to either insert or ignore on conflict
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "batch_hash"}},
DoNothing: true,
})
if err := db.Create(l1BatchEvent).Error; err != nil {
return fmt.Errorf("failed to insert or ignore batch event, error: %w", err)
return fmt.Errorf("failed to insert batch event, error: %w", err)
}
case BatchStatusTypeFinalized:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"time"
"github.com/google/uuid"
"github.com/scroll-tech/go-ethereum/common"
"gorm.io/gorm"
"gorm.io/gorm/clause"
@@ -46,9 +47,9 @@ const (
TxStatusTypeSent TxStatusType = iota
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
TxStatusTypeRelayed // Terminal status.
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeFailedRelayed
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
// FailedRelayedMessage event: encoded tx failed, cannot retry. e.g., https://sepolia.scrollscan.com/tx/0xfc7d3ea5ec8dc9b664a5a886c3b33d21e665355057601033481a439498efb79a
TxStatusTypeFailedRelayed // Terminal status.
// In some cases, user can retry with a larger gas limit. e.g., https://sepolia.scrollscan.com/tx/0x7323a7ba29492cb47d92206411be99b27896f2823cee0633a596b646b73f1b5b
TxStatusTypeRelayTxReverted
TxStatusTypeSkipped
TxStatusTypeDropped // Terminal status.
@@ -253,27 +254,38 @@ func (c *CrossMessage) GetTxsByAddress(ctx context.Context, sender string) ([]*C
}
// UpdateL1MessageQueueEventsInfo updates the information about L1 message queue events in the database.
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent) error {
func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1MessageQueueEvents []*MessageQueueEvent, dbTX ...*gorm.DB) error {
// update tx statuses.
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
db = db.Where("tx_status != ?", TxStatusTypeFailedRelayed)
db = db.Where("tx_status != ?", TxStatusTypeDropped)
txStatusUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case MessageQueueEventTypeQueueTransaction:
continue
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
// replayMessage case:
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
// Transaction reverted in L2: https://sepolia.scrollscan.com/tx/0xde6ef307a7da255888aad7a4c40a6b8c886e46a8a05883070bbf18b736cbfb8c
// replayMessage: https://sepolia.etherscan.io/tx/0xa5392891232bb32d98fcdbaca0d91b4d22ef2755380d07d982eebd47b147ce28
//
// Note: update l1_tx_hash if the user calls replayMessage, cannot use queue index here,
// because in replayMessage, queue index != message nonce.
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
txStatusUpdateFields["tx_status"] = TxStatusTypeSent // reset status to "sent".
case MessageQueueEventTypeDequeueTransaction:
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
db = db.Where("tx_status != ?", TxStatusTypeDropped)
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = TxStatusTypeSkipped
case MessageQueueEventTypeDropTransaction:
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
db = db.Where("tx_status != ?", TxStatusTypeDropped)
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = TxStatusTypeDropped
@@ -286,22 +298,15 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
// update tx hashes of replay and refund.
for _, l1MessageQueueEvent := range l1MessageQueueEvents {
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
txHashUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case MessageQueueEventTypeDequeueTransaction:
continue
case MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.
// replayMessage case:
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
// Transaction reverted in L2: https://sepolia.scrollscan.com/tx/0xde6ef307a7da255888aad7a4c40a6b8c886e46a8a05883070bbf18b736cbfb8c
// replayMessage: https://sepolia.etherscan.io/tx/0xa5392891232bb32d98fcdbaca0d91b4d22ef2755380d07d982eebd47b147ce28
//
// Note: update l1_tx_hash if the user calls replayMessage, cannot use queue index here,
// because in replayMessage, queue index != message nonce.
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
// only replayMessages or enforced txs (whose message hashes would not be found), sentMessages have been filtered out.
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
case MessageQueueEventTypeDropTransaction:
@@ -309,8 +314,11 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txHashUpdateFields["l1_refund_tx_hash"] = l1MessageQueueEvent.TxHash.String()
}
if err := db.Updates(txHashUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
// Check if there are fields to update to avoid empty update operation (skip message).
if len(txHashUpdateFields) > 0 {
if err := db.Updates(txHashUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx hashes of replay and refund in L1 message queue events info, update fields: %v, error: %w", txHashUpdateFields, err)
}
}
}
return nil
@@ -354,11 +362,14 @@ func (c *CrossMessage) UpdateBatchIndexRollupStatusMerkleProofOfL2Messages(ctx c
}
// InsertOrUpdateL1Messages inserts or updates a list of L1 cross messages into the database.
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage) error {
func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
@@ -373,14 +384,18 @@ func (c *CrossMessage) InsertOrUpdateL1Messages(ctx context.Context, messages []
}
// InsertOrUpdateL2Messages inserts or updates a list of L2 cross messages into the database.
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage) error {
func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
// 'tx_status' column is not explicitly assigned during the update to prevent a later status from being overwritten back to "sent".
// The merkle_proof is updated separately in batch status updates and hence is not included here.
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoUpdates: clause.AssignmentColumns([]string{"sender", "receiver", "token_type", "l2_block_number", "l2_tx_hash", "l1_token_address", "l2_token_address", "token_ids", "token_amounts", "message_type", "block_timestamp", "message_from", "message_to", "message_value", "message_data", "message_nonce"}),
@@ -391,60 +406,31 @@ func (c *CrossMessage) InsertOrUpdateL2Messages(ctx context.Context, messages []
return nil
}
// InsertFailedL2GatewayTxs inserts a list of transactions that failed to interact with the L2 gateways into the database.
// To resolve unique index confliction, L2 tx hash is used as the MessageHash.
// The OnConflict clause is used to prevent inserting same failed transactions multiple times.
func (c *CrossMessage) InsertFailedL2GatewayTxs(ctx context.Context, messages []*CrossMessage) error {
// InsertFailedGatewayRouterTxs inserts a list of transactions that failed to interact with the gateway router into the database.
// These failed transactions are only fetched once, so they are inserted without checking for duplicates.
// To resolve unique index confliction, a random UUID will be generated and used as the MessageHash.
func (c *CrossMessage) InsertFailedGatewayRouterTxs(ctx context.Context, messages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(messages) == 0 {
return nil
}
for _, message := range messages {
message.MessageHash = message.L2TxHash
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoNothing: true,
})
if err := db.Create(&messages).Error; err != nil {
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
}
return nil
}
// InsertFailedL1GatewayTxs inserts a list of transactions that failed to interact with the L1 gateways into the database.
// To resolve unique index confliction, L1 tx hash is used as the MessageHash.
// The OnConflict clause is used to prevent inserting same failed transactions multiple times.
func (c *CrossMessage) InsertFailedL1GatewayTxs(ctx context.Context, messages []*CrossMessage) error {
if len(messages) == 0 {
return nil
}
for _, message := range messages {
message.MessageHash = message.L1TxHash
message.MessageHash = uuid.New().String()
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "message_hash"}},
DoNothing: true,
})
if err := db.Create(&messages).Error; err != nil {
if err := db.Create(messages).Error; err != nil {
return fmt.Errorf("failed to insert failed gateway router txs, error: %w", err)
}
return nil
}
// InsertOrUpdateL2RelayedMessagesOfL1Deposits inserts or updates the database with a list of L2 relayed messages related to L1 deposits.
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage) error {
func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.Context, l2RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(l2RelayedMessages) == 0 {
return nil
}
@@ -473,7 +459,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
for _, msg := range mergedL2RelayedMessages {
uniqueL2RelayedMessages = append(uniqueL2RelayedMessages, msg)
}
// Do not update tx status of successfully relayed messages,
// Do not update tx status of successfully or failed relayed messages,
// because if a message is handled, the later relayed message tx would be reverted.
// ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L2/L2ScrollMessenger.sol#L102
// e.g.,
@@ -490,6 +476,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
),
},
@@ -502,7 +489,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
}
// InsertOrUpdateL1RelayedMessagesOfL2Withdrawals inserts or updates the database with a list of L1 relayed messages related to L2 withdrawals.
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage) error {
func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx context.Context, l1RelayedMessages []*CrossMessage, dbTX ...*gorm.DB) error {
if len(l1RelayedMessages) == 0 {
return nil
}
@@ -532,6 +519,9 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
uniqueL1RelayedMessages = append(uniqueL1RelayedMessages, msg)
}
db := c.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Clauses(clause.OnConflict{
@@ -542,6 +532,7 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeFailedRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
),
},

View File

@@ -15,7 +15,6 @@ CREATE TABLE batch_event_v2
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS unique_idx_be_batch_hash ON batch_event_v2 (batch_hash);
CREATE INDEX IF NOT EXISTS idx_be_l1_block_number ON batch_event_v2 (l1_block_number);
CREATE INDEX IF NOT EXISTS idx_be_batch_index ON batch_event_v2 (batch_index);
CREATE INDEX IF NOT EXISTS idx_be_batch_index_batch_hash ON batch_event_v2 (batch_index, batch_hash);

View File

@@ -1374,6 +1374,12 @@ dependencies = [
"syn 1.0.109",
]
[[package]]
name = "glob"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "gloo-timers"
version = "0.2.6"
@@ -4172,6 +4178,7 @@ version = "0.1.0"
dependencies = [
"base64 0.13.1",
"env_logger 0.9.3",
"glob",
"halo2_proofs",
"libc",
"log",

View File

@@ -23,6 +23,7 @@ halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch =
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.8", default-features = false, features = ["parallel_syn", "scroll", "shanghai", "strict-ccc"] }
glob = "0.3"
base64 = "0.13.0"
env_logger = "0.9.0"
libc = "0.2"

View File

@@ -66,7 +66,7 @@ pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *const c_char {
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *mut c_char {
let check_result: Result<bool, String> = panic_catch(|| {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
@@ -94,7 +94,7 @@ pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *con
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
serde_json::to_vec(&r).map_or(std::ptr::null_mut(), vec_to_c_char)
}
/// # Safety
@@ -102,7 +102,7 @@ pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *con
pub unsafe extern "C" fn gen_batch_proof(
chunk_hashes: *const c_char,
chunk_proofs: *const c_char,
) -> *const c_char {
) -> *mut c_char {
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
let chunk_hashes = c_char_to_vec(chunk_hashes);
let chunk_proofs = c_char_to_vec(chunk_proofs);
@@ -143,7 +143,7 @@ pub unsafe extern "C" fn gen_batch_proof(
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
serde_json::to_vec(&r).map_or(std::ptr::null_mut(), vec_to_c_char)
}
/// # Safety

View File

@@ -5,14 +5,15 @@ use crate::{
OUTPUT_DIR,
},
};
use glob::glob;
use libc::c_char;
use prover::{
consts::CHUNK_VK_FILENAME,
utils::init_env_and_log,
utils::{get_block_trace_from_file, init_env_and_log},
zkevm::{Prover, Verifier},
BlockTrace, ChunkProof,
};
use std::{cell::OnceCell, env, ptr::null};
use std::{cell::OnceCell, env, ffi::CString, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
@@ -66,9 +67,14 @@ pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
pub unsafe extern "C" fn gen_chunk_proof(block_traces1: *const c_char) {
let chunk_trace = load_batch_traces().1;
let json_str = serde_json::to_string(&chunk_trace).expect("Serialization failed");
let c_string = CString::new(json_str).expect("CString conversion failed");
let c_str_ptr = c_string.as_ptr();
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
let block_traces = c_char_to_vec(block_traces);
let block_traces = c_char_to_vec(c_str_ptr);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
@@ -82,7 +88,7 @@ pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match proof_result {
let _ = match proof_result {
Ok(proof_bytes) => ProofResult {
message: Some(proof_bytes),
error: None,
@@ -93,7 +99,7 @@ pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
// serde_json::to_vec(&r).map_or(std::ptr::null_mut(), vec_to_c_char)
}
/// # Safety
@@ -105,3 +111,31 @@ pub unsafe extern "C" fn verify_chunk_proof(proof: *const c_char) -> c_char {
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_chunk_proof(proof));
verified.unwrap_or(false) as c_char
}
fn load_batch_traces() -> (Vec<String>, Vec<BlockTrace>) {
let file_names: Vec<String> = glob(&"/assets/traces/1_transfer.json".to_string())
.unwrap()
.map(|p| p.unwrap().to_str().unwrap().to_string())
.collect();
log::info!("test batch with {:?}", file_names);
let mut names_and_traces = file_names
.into_iter()
.map(|trace_path| {
let trace: BlockTrace = get_block_trace_from_file(trace_path.clone());
(
trace_path,
trace.clone(),
trace.header.number.unwrap().as_u64(),
)
})
.collect::<Vec<_>>();
names_and_traces.sort_by(|a, b| a.2.cmp(&b.2));
log::info!(
"sorted: {:?}",
names_and_traces
.iter()
.map(|(f, _, _)| f.clone())
.collect::<Vec<String>>()
);
names_and_traces.into_iter().map(|(f, t, _)| (f, t)).unzip()
}

View File

@@ -1,6 +1,6 @@
#![feature(once_cell)]
mod batch;
mod chunk;
pub mod chunk;
mod types;
mod utils;

View File

@@ -38,7 +38,7 @@ pub(crate) fn string_to_c_char(string: String) -> *const c_char {
CString::new(string).unwrap().into_raw()
}
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *mut c_char {
CString::new(bytes).unwrap().into_raw()
}

View File

@@ -0,0 +1,66 @@
use glob::glob;
use prover::{utils::get_block_trace_from_file, BlockTrace};
use std::ffi::{CStr, CString};
use zkp::chunk;
#[test]
fn chunk_test() {
println!("start chunk_test.");
unsafe {
let params = CString::new("/assets/test_params").expect("test_params conversion failed");
let assets = CString::new("/assets/test_assets").expect("test_assets conversion failed");
chunk::init_chunk_prover(params.as_ptr(), assets.as_ptr());
let chunk_trace = load_batch_traces().1;
let json_str = serde_json::to_string(&chunk_trace).expect("Serialization failed");
let c_string = CString::new(json_str).expect("CString conversion failed");
let c_str_ptr = c_string.as_ptr();
let ptr_cstr = CStr::from_ptr(c_str_ptr)
.to_str()
.expect("Failed to convert C string to Rust string");
println!("c_str_ptr len: {:?}", ptr_cstr.len());
let mut count = 1;
loop {
count += 1;
println!("count {:?}", count);
let _ = chunk::gen_chunk_proof(c_str_ptr);
// let ret_cstr = CStr::from_ptr(ret)
// .to_str()
// .expect("Failed to convert C string to Rust string");
// println!("ret: {:?}", ret_cstr)
}
}
}
fn load_batch_traces() -> (Vec<String>, Vec<BlockTrace>) {
let file_names: Vec<String> = glob(&"/assets/traces/1_transfer.json".to_string())
.unwrap()
.map(|p| p.unwrap().to_str().unwrap().to_string())
.collect();
log::info!("test batch with {:?}", file_names);
let mut names_and_traces = file_names
.into_iter()
.map(|trace_path| {
let trace: BlockTrace = get_block_trace_from_file(trace_path.clone());
(
trace_path,
trace.clone(),
trace.header.number.unwrap().as_u64(),
)
})
.collect::<Vec<_>>();
names_and_traces.sort_by(|a, b| a.2.cmp(&b.2));
log::info!(
"sorted: {:?}",
names_and_traces
.iter()
.map(|(f, _, _)| f.clone())
.collect::<Vec<String>>()
);
names_and_traces.into_iter().map(|(f, t, _)| (f, t)).unzip()
}

View File

@@ -8,7 +8,7 @@ char verify_batch_proof(char* proof);
void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);
char* get_chunk_vk();
char* gen_chunk_proof(char* block_traces);
void gen_chunk_proof(char* block_traces);
char verify_chunk_proof(char* proof);
char* block_traces_to_chunk_info(char* block_traces);

View File

@@ -21,8 +21,8 @@ const (
// GasOracleImported represents the gas oracle status is imported
GasOracleImported
// GasOracleImportedFailed represents the gas oracle status is imported failed
GasOracleImportedFailed
// GasOracleFailed represents the gas oracle status is failed
GasOracleFailed
)
func (s GasOracleStatus) String() string {
@@ -35,10 +35,10 @@ func (s GasOracleStatus) String() string {
return "GasOracleImporting"
case GasOracleImported:
return "GasOracleImported"
case GasOracleImportedFailed:
return "GasOracleImportedFailed"
case GasOracleFailed:
return "GasOracleFailed"
default:
return fmt.Sprintf("Undefined GasOracleStatus (%d)", int32(s))
return fmt.Sprintf("Undefined (%d)", int32(s))
}
}
@@ -159,7 +159,7 @@ func (ps ProvingStatus) String() string {
case ProvingTaskFailed:
return "failed"
default:
return fmt.Sprintf("Undefined ProvingStatus (%d)", int32(ps))
return fmt.Sprintf("Undefined (%d)", int32(ps))
}
}
@@ -184,7 +184,7 @@ func (s ChunkProofsStatus) String() string {
case ChunkProofsStatusReady:
return "ChunkProofsStatusReady"
default:
return fmt.Sprintf("Undefined ChunkProofsStatus (%d)", int32(s))
return fmt.Sprintf("Undefined (%d)", int32(s))
}
}
@@ -227,69 +227,6 @@ func (s RollupStatus) String() string {
case RollupFinalizeFailed:
return "RollupFinalizeFailed"
default:
return fmt.Sprintf("Undefined RollupStatus (%d)", int32(s))
}
}
// SenderType defines the various types of senders sending the transactions.
type SenderType int
const (
// SenderTypeUnknown indicates an unknown sender type.
SenderTypeUnknown SenderType = iota
// SenderTypeCommitBatch indicates the sender is responsible for committing batches.
SenderTypeCommitBatch
// SenderTypeFinalizeBatch indicates the sender is responsible for finalizing batches.
SenderTypeFinalizeBatch
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
SenderTypeL1GasOracle
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
SenderTypeL2GasOracle
)
// String returns a string representation of the SenderType.
func (t SenderType) String() string {
switch t {
case SenderTypeCommitBatch:
return "SenderTypeCommitBatch"
case SenderTypeFinalizeBatch:
return "SenderTypeFinalizeBatch"
case SenderTypeL1GasOracle:
return "SenderTypeL1GasOracle"
case SenderTypeL2GasOracle:
return "SenderTypeL2GasOracle"
default:
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
}
}
// TxStatus represents the current status of a transaction in the transaction lifecycle.
type TxStatus int
const (
// TxStatusUnknown represents an undefined status of the transaction.
TxStatusUnknown TxStatus = iota
// TxStatusPending indicates that the transaction is yet to be processed.
TxStatusPending
// TxStatusReplaced indicates that the transaction has been replaced by another one, typically due to a higher gas price.
TxStatusReplaced
// TxStatusConfirmed indicates that the transaction has been successfully processed and confirmed.
TxStatusConfirmed
// TxStatusConfirmedFailed indicates that the transaction has failed during processing.
TxStatusConfirmedFailed
)
func (s TxStatus) String() string {
switch s {
case TxStatusPending:
return "TxStatusPending"
case TxStatusReplaced:
return "TxStatusReplaced"
case TxStatusConfirmed:
return "TxStatusConfirmed"
case TxStatusConfirmedFailed:
return "TxStatusConfirmedFailed"
default:
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
return fmt.Sprintf("Undefined (%d)", int32(s))
}
}

View File

@@ -75,7 +75,7 @@ func TestProvingStatus(t *testing.T) {
{
"Undefined",
ProvingStatus(999), // Invalid value.
"Undefined ProvingStatus (999)",
"Undefined (999)",
},
}
@@ -85,243 +85,3 @@ func TestProvingStatus(t *testing.T) {
})
}
}
func TestRollupStatus(t *testing.T) {
tests := []struct {
name string
s RollupStatus
want string
}{
{
"RollupUndefined",
RollupUndefined,
"Undefined RollupStatus (0)",
},
{
"RollupPending",
RollupPending,
"RollupPending",
},
{
"RollupCommitting",
RollupCommitting,
"RollupCommitting",
},
{
"RollupCommitted",
RollupCommitted,
"RollupCommitted",
},
{
"RollupFinalizing",
RollupFinalizing,
"RollupFinalizing",
},
{
"RollupFinalized",
RollupFinalized,
"RollupFinalized",
},
{
"RollupCommitFailed",
RollupCommitFailed,
"RollupCommitFailed",
},
{
"RollupFinalizeFailed",
RollupFinalizeFailed,
"RollupFinalizeFailed",
},
{
"Invalid Value",
RollupStatus(999),
"Undefined RollupStatus (999)",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, tt.s.String())
})
}
}
func TestSenderType(t *testing.T) {
tests := []struct {
name string
t SenderType
want string
}{
{
"SenderTypeUnknown",
SenderTypeUnknown,
"Unknown SenderType (0)",
},
{
"SenderTypeCommitBatch",
SenderTypeCommitBatch,
"SenderTypeCommitBatch",
},
{
"SenderTypeFinalizeBatch",
SenderTypeFinalizeBatch,
"SenderTypeFinalizeBatch",
},
{
"SenderTypeL1GasOracle",
SenderTypeL1GasOracle,
"SenderTypeL1GasOracle",
},
{
"SenderTypeL2GasOracle",
SenderTypeL2GasOracle,
"SenderTypeL2GasOracle",
},
{
"Invalid Value",
SenderType(999),
"Unknown SenderType (999)",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, tt.t.String())
})
}
}
func TestTxStatus(t *testing.T) {
tests := []struct {
name string
s TxStatus
want string
}{
{
"TxStatusUnknown",
TxStatusUnknown,
"Unknown TxStatus (0)",
},
{
"TxStatusPending",
TxStatusPending,
"TxStatusPending",
},
{
"TxStatusReplaced",
TxStatusReplaced,
"TxStatusReplaced",
},
{
"TxStatusConfirmed",
TxStatusConfirmed,
"TxStatusConfirmed",
},
{
"TxStatusConfirmedFailed",
TxStatusConfirmedFailed,
"TxStatusConfirmedFailed",
},
{
"Invalid Value",
TxStatus(999),
"Unknown TxStatus (999)",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, tt.s.String())
})
}
}
func TestGasOracleStatus(t *testing.T) {
tests := []struct {
name string
s GasOracleStatus
want string
}{
{
"GasOracleUndefined",
GasOracleUndefined,
"GasOracleUndefined",
},
{
"GasOraclePending",
GasOraclePending,
"GasOraclePending",
},
{
"GasOracleImporting",
GasOracleImporting,
"GasOracleImporting",
},
{
"GasOracleImported",
GasOracleImported,
"GasOracleImported",
},
{
"GasOracleImportedFailed",
GasOracleImportedFailed,
"GasOracleImportedFailed",
},
{
"Invalid Value",
GasOracleStatus(999),
"Undefined GasOracleStatus (999)",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, tt.s.String())
})
}
}
func TestProverTaskFailureType(t *testing.T) {
tests := []struct {
name string
r ProverTaskFailureType
want string
}{
{
"ProverTaskFailureTypeUndefined",
ProverTaskFailureTypeUndefined,
"prover task failure undefined",
},
{
"ProverTaskFailureTypeTimeout",
ProverTaskFailureTypeTimeout,
"prover task failure timeout",
},
{
"ProverTaskFailureTypeSubmitStatusNotOk",
ProverTaskFailureTypeSubmitStatusNotOk,
"prover task failure validated submit proof status not ok",
},
{
"ProverTaskFailureTypeVerifiedFailed",
ProverTaskFailureTypeVerifiedFailed,
"prover task failure verified failed",
},
{
"ProverTaskFailureTypeServerError",
ProverTaskFailureTypeServerError,
"prover task failure server exception",
},
{
"Invalid Value",
ProverTaskFailureType(999),
"illegal prover task failure type (999)",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, tt.r.String())
})
}
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.59"
var tag = "v4.3.55"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -1,3 +1 @@
declare module "circomlib/src/evmasm";
declare module "circomlib/src/poseidon_gencontract";
declare module "circomlib/src/poseidon_constants";

View File

@@ -1,97 +0,0 @@
/* eslint-disable node/no-missing-import */
/* eslint-disable node/no-unpublished-import */
import { expect } from "chai";
import { randomBytes } from "crypto";
import { BigNumber, Contract } from "ethers";
import { ethers } from "hardhat";
import fs from "fs";
import PoseidonWithoutDomain from "circomlib/src/poseidon_gencontract";
import { generateABI, createCode } from "../scripts/poseidon";
describe("PoseidonHash.spec", async () => {
// test against with circomlib's implementation.
context("domain = zero", async () => {
let poseidonCircom: Contract;
let poseidon: Contract;
beforeEach(async () => {
const [deployer] = await ethers.getSigners();
const PoseidonWithoutDomainFactory = new ethers.ContractFactory(
PoseidonWithoutDomain.generateABI(2),
PoseidonWithoutDomain.createCode(2),
deployer
);
poseidonCircom = await PoseidonWithoutDomainFactory.deploy();
await poseidonCircom.deployed();
const PoseidonWithDomainFactory = new ethers.ContractFactory(generateABI(2), createCode(2), deployer);
poseidon = await PoseidonWithDomainFactory.deploy();
await poseidon.deployed();
});
it("should succeed on zero inputs", async () => {
expect(await poseidonCircom["poseidon(uint256[2])"]([0, 0])).to.eq(
await poseidon["poseidon(uint256[2],uint256)"]([0, 0], 0)
);
});
it("should succeed on random inputs", async () => {
for (let bytes = 1; bytes <= 32; ++bytes) {
for (let i = 0; i < 5; ++i) {
const a = randomBytes(bytes);
const b = randomBytes(bytes);
expect(await poseidonCircom["poseidon(uint256[2])"]([a, b])).to.eq(
await poseidon["poseidon(uint256[2],uint256)"]([a, b], 0)
);
expect(await poseidonCircom["poseidon(uint256[2])"]([a, 0])).to.eq(
await poseidon["poseidon(uint256[2],uint256)"]([a, 0], 0)
);
expect(await poseidonCircom["poseidon(uint256[2])"]([0, b])).to.eq(
await poseidon["poseidon(uint256[2],uint256)"]([0, b], 0)
);
}
}
});
});
// test against with scroll's go implementation.
context("domain = nonzero", async () => {
let poseidonCircom: Contract;
let poseidon: Contract;
beforeEach(async () => {
const [deployer] = await ethers.getSigners();
const PoseidonWithoutDomainFactory = new ethers.ContractFactory(
PoseidonWithoutDomain.generateABI(2),
PoseidonWithoutDomain.createCode(2),
deployer
);
poseidonCircom = await PoseidonWithoutDomainFactory.deploy();
await poseidonCircom.deployed();
const PoseidonWithDomainFactory = new ethers.ContractFactory(generateABI(2), createCode(2), deployer);
poseidon = await PoseidonWithDomainFactory.deploy();
await poseidon.deployed();
});
it("should succeed on zero inputs", async () => {
expect(await poseidon["poseidon(uint256[2],uint256)"]([0, 0], 6)).to.eq(
BigNumber.from("17848312925884193353134534408113064827548730776291701343555436351962284922129")
);
expect(await poseidon["poseidon(uint256[2],uint256)"]([0, 0], 7)).to.eq(
BigNumber.from("20994231331856095272861976502721128670019193481895476667943874333621461724676")
);
});
it("should succeed on random inputs", async () => {
const lines = String(fs.readFileSync("./integration-test/testdata/poseidon_hash_with_domain.data")).split("\n");
for (const line of lines) {
const [domain, a, b, hash] = line.split(" ");
expect(await poseidon["poseidon(uint256[2],uint256)"]([a, b], domain)).to.eq(BigNumber.from(hash));
}
});
});
});

View File

@@ -5,9 +5,7 @@ import { concat } from "ethers/lib/utils";
import { ethers } from "hardhat";
import { MockZkTrieVerifier } from "../typechain";
import { generateABI, createCode } from "../scripts/poseidon";
const chars = "0123456789abcdef";
import poseidonUnit from "circomlib/src/poseidon_gencontract";
interface ITestConfig {
block: number;
@@ -22,245 +20,170 @@ interface ITestConfig {
const testcases: Array<ITestConfig> = [
{
// curl -H "content-type: application/json" -X POST --data '{"id":0,"jsonrpc":"2.0","method":"eth_getProof","params":["0x5300000000000000000000000000000000000004", ["0x8391082587ea494a8beba02cc40273f27e5477a967cd400736ac46950da0b378"], "0x1111ad"]}' https://rpc.scroll.io
block: 1118637,
desc: "WETH.balance[0xa7994f02237aed2c116a702a8f5322a1fb325b31]",
block: 95216,
desc: "contract with storage",
account: "0x5300000000000000000000000000000000000004",
storage: "0x8391082587ea494a8beba02cc40273f27e5477a967cd400736ac46950da0b378",
expectedRoot: "0x1334a21a74914182745c1f5142e70b487262096784ae7669186657462c01b103",
expectedValue: "0x00000000000000000000000000000000000000000000000000006239b5a2c000",
storage: "0x9505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a",
expectedRoot: "0x2dc794537b959b575dc216cd11389d802f9389fce7183278561a824aa8e950e2",
expectedValue: "0x00000000000000000000000000000000000000000000000111346048bf18a14a",
accountProof: [
"0x0907d980105678a2007eb5683d850f36a9caafe6e7fd3279987d7a94a13a360d3a1478f9a4c1f8c755227ee3544929bb0d7cfa2d999a48493d048ff0250bb002ab",
"0x092b59a024f142555555c767842c4fcc3996686c57699791fcb10013f69ffd9b2507360087cb303767fd43f2650960621246a8d205d086e03d9c1626e4aaa5b143",
"0x091f876342916ac1d5a14ef40cfc5644452170b16d1b045877f303cd52322ba1e00ba09f36443c2a63fbd7ff8feeb2c84e99fde6db08fd8e4c67ad061c482ff276",
"0x09277b3069a4b944a45df222366aae727ec64efaf0a8ecb000645d0eea3a3fa93609b925158cc04f610f8c616369094683ca7a86239f49e97852aa286d148a3913",
"0x092fb789200a7324067934da8be91c48f86c4e6f35fed6d1ce8ae4d7051f480bc0074019222c788b139b6919dfbc9d0b51f274e0ed3ea03553b8db30392ac05ce4",
"0x092f79da8f9f2c3a3a3813580ff18d4619b95f54026b2f16ccbcca684d5e25e1f52912fa319d9a7ba537a52cc6571844b4d1aa99b8a78cea6f686a6279ade5dcae",
"0x09249d249bcf92a369bd7715ec63a4b29d706a5dbb304efd678a2e5d7982e7fa9b202e3225c1031d83ab62d78516a4cbdbf2b22842c57182e7cb0dbb4303ac38c5",
"0x0904837ebb85ceccab225d4d826fe57edca4b00862199b91082f65dfffa7669b90039c710273b02e60c2e74eb8b243721e852e0e56fa51668b6362fd920f817cb7",
"0x090a36f6aabc3768a05dd8f93667a0eb2e5b63d94b5ce27132fb38d13c56d49cb4249c2013daee90184ae285226271f150f6a8f74f2c85dbd0721c5f583e620b10",
"0x091b82f139a06af573e871fdd5f5ac18f17c568ffe1c9e271505b371ad7f0603e716b187804a49d2456a0baa7c2317c14d9aa7e58ad64df38bc6c1c7b86b072333",
"0x0929668e59dfc2e2aef10194f5d287d8396e1a897d68f106bdb12b9541c0bab71d2bf910dea11e3209b3feff88d630af46006e402e935bc84c559694d88c117733",
"0x0914231c92f09f56628c10603dc2d2120d9d11b27fa23753a14171127c3a1ee3dd0d6b9cbd11d031fe6e1b650023edc58aa580fa4f4aa1b30bf82e0e4c7a308bb9",
"0x0914c1dd24c520d96aac93b7ef3062526067f1b15a080c482abf449d3c2cde781b195eb63b5e328572090319310914d81b2ca8350b6e15dc9d13e878f8c28c9d52",
"0x0927cb93e3d9c144a5a3653c5cf2ed5940d64f461dd588cd192516ae7d855e9408166e85986d4c9836cd6cd822174ba9db9c7a043d73e86b5b2cfc0a2e082894c3",
"0x090858bf8a0119626fe9339bd92116a070ba1a66423b0f7d3f4666b6851fdea01400f7f51eb22df168c41162d7f18f9d97155d87da523b05a1dde54e7a30a98c31",
"0x0902776c1f5f93a95baea2e209ddb4a5e49dd1112a7f7d755a45addffe4a233dad0d8cc62b957d9b254fdc8199c720fcf8d5c65d14899911e991b4530710aca75e",
"0x091d7fde5c78c88bbf6082a20a185cde96a203ea0d29c829c1ab9322fc3ca0ae3100ef7cba868cac216d365a0232ad6227ab1ef3290166bc6c19b719b79dbc17fc",
"0x091690160269c53c6b74337a00d02cb40a88ea5eba06e1942088b619baee83279e12d96d62dda9c4b5897d58fea40b5825d87a5526dec37361ec7c93a3256ea76d",
"0x091bccb091cde3f8ca7cfda1df379c9bfa412908c41037ae4ec0a20ce984e2c9a51d02c109d2e6e25dc60f10b1bc3b3f97ca1ce1aa025ce4f3146de3979403b99e",
"0x0927083540af95e57acba69671a4a596f721432549b8760941f4251e0dd7a013a917cee0f60d333cf88e40ae8710fb1fd6e3920346a376b3ba6686a4b2020a043e",
"0x082170b57b8f05f6990eec62e74cdb303741f6c464a85d68582c19c51e53f490000a5029a62ddc14c9c07c549db300bd308b6367454966c94b8526f4ceed5693b2",
"0x0827a0b16ef333dcfe00610d19dc468b9e856f544c9b5e9b046357e0a38aedaeb90000000000000000000000000000000000000000000000000000000000000000",
"0x06126f891e8753e67c5cbfa2a67e9d71942eab3a88cde86e97a4af94ea0dde497821fb69ccdb00e6eaeaf7fc1e73630f39f846970b72ac801e396da0033fb0c247",
"0x0420e9fb498ff9c35246d527da24aa1710d2cc9b055ecf9a95a8a2a11d3d836cdf050800000000000000000000000000000000000000000000000016ef00000000000000000000000000000000000000000000000000000000000000600058d1a5ce14104d0dedcaecaab39b6e22c2608e40af67a71908e6e97bbf4a43c59c4537140c25a9e8c4073351c26b9831c1e5af153b9be4713a4af9edfdf32b58077b735e120f14136a7980da529d9e8d3a71433fc9dc5aa8c01e3a4eb60cb3a4f9cf9ca5c8e0be205300000000000000000000000000000000000004000000000000000000000000",
"0x001988ce414103e97cb80613adde47d5fe0611b30087d1cfcdb84284c42907467e24ac744be2edcb86cdfc42a9bbb7b2a270649161c3ce3a41d3ad5a26927d2c79",
"0x0028db7c407cab6652f1f194401bd87bda33c9a1723b4f93515bd5929cad02668123fa5a3e69136c8e03a62c805f89c9d3578a6f5fac4bb281fc4d7df12fbcc5db",
"0x000376d1bfe3d5c6afffb5707a34003209c57fbf15430daf0f8022b4df2bb947460ab4fda7be343efd34af2420e8e9d4268f436cb7700a005086df4eba083407c8",
"0x0025df09dd66dd9d8b5b1abb82cee9985a2addd12e7f5671c910e27644ccaf498c2a2d7021169172e380831f43a00f0a3bef8576c7c74ac98fd7e7b1ec443ac92e",
"0x00218d51f8e754bf89062007dd765b50b7385bbb4a57db258ac8dcf9ad69b6f4552ddc5a17cec74d8e8f06e16c0a6112023c34d6c001060bc783ab4d06a4a9801a",
"0x001166c2eedfbbb4568ec27c57b2729437c0c8c38161fad643f03f76fbd807e712286d86bfdceb6729daedb6f219dd0f6080386d9a2a8f9c1dcb89792c8754e125",
"0x0028fd666ed406e277f6496bcac13af8b303b58c74be937399095399f4dd141c6f2876f81684c2546ff90b221ba2fe1290e671770af08fd545868e3351401b1503",
"0x000b9245c7ccc1eab305d40cced5e8aac6c8ddb877451075185bb7a6c1a4973a5d2852ce761c8e417a5f604a6ef4196ec101014aa1d1e4c684d1b5b8cbec5c37b1",
"0x0019755e50ef22e13ae17cbc33d9e708ee9efc011941b3a920bc65da9825b04eb029a43488e5584b68d1a98a215f03f31e063734a3305600f9feed11607271d0d3",
"0x002e10cc0afbf5b336e6a6eeae0c863df7a7c2ba61c599618fb973aeff397918e523b18c08a19fa6bc964ae41c56af610ab43d948db94ad2543e9807a5a0f1d2f0",
"0x00247f3f0cebebf749e27c8ffd81e9919cab114bd3d75029e3260e99b6c7fe551d06a69531144f521b68d1a2c7450f5a20146efdaf7b47271782bb8746a023cf84",
"0x0029ad88f0ee7198edcae37ab88efb2a27ea8956d6b988264b227843c175743c4329916ead363e6adfc27f400977d2d9efb1f896616a18d71e2702ec8201b82c57",
"0x002a1de55ee84561850354085516a1101705f8240b8f1e1f9aea3a464650d637a52fad2de438ac5851b0e28508af90bd385dbcad5df8ea23ca78792f094ff7ca0d",
"0x001ba118afa1977f1fda1411cd1c7f145ab97a35b1e724060d5cfc3d58b27141ee2b0a8dbf3d494c6a1bf6456c4de00de8e2f0d9be0716a3ca78d4df28948f975b",
"0x0025bdbf508c1e3808415136bfdd6dfb548c33348d813882b0d405913405d575010c60f95c658dc8113f7c97935a35d78c23dba131c25866fc8d93920e318d2450",
"0x0007bc3ec4d80df884c4d87f4541ffa522046a4c52e6cccb9ff7376ff56149e5d21b87a56676f679f4b8b4478c8a3aa80a09127258cccd4aa373a5c7c2344d2d03",
"0x010aef26efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071d0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681205300000000000000000000000000000000000004000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
storageProof: [
"0x09240ea2601c34d792a0a5a8a84d8e501cfdfdf2c10ef13ea560acac58661882dd1b3644d1d4f3e32fc78498a7ebeffac8c6a494ac6f36923ef1be476444c0d564",
"0x0912af3ac8f8ea443e6d89d071fccaa2b3c8462220c1c2921234f613b41594f08f2a170e61f5f436b536c155b438044cf0d0f24b94b4c034ad22b3eae824998243",
"0x0916011d547d7a54929c3515078f4f672c6b390ccdd4119f0776376910bc5a38da1a059ed9c504fadcc9f77e8a402175743bee1f5be27b7002b0f6c5b51070452c",
"0x09017285edc268d979eb410b46627e541afda16cdb3577ce04c15dc14cc6609c60143f0c01e71e99b2efbe3d8e62a2c812889aa9fd88dd4b0ed8eadcf1ec9b096a",
"0x0922901e65200b007ad8e1b972e90403b336e459e0cf9b9d68732da345b1b0f6872c9e3f3edacbd857b26d0a66a80aa56c6ebaa9849e9ea5a2b17fd59cabe138e4",
"0x091b77a00164a72880eec6c18fc043fa99f922e20bbee156e1ebfd3a358bee6bbb24d97cfaa234befe197a567476cade91b7d97a1017b8d5286dae4dddadffe1cd",
"0x09216f1c4d67a9a428885bb8d978ad369d2d69d4dcc1692c3a0c3ea05da7d6f0ac2d6dda722e76eb513c67718e7be0478851758be5547322473a53b5b2b67faf95",
"0x091f56c6f18ceb7077125df1ed17a42a85956090594125c1b182161de20f8af6aa2e36977412f9ea2ad2c0951153969eca8408317558ff1b6b4ad731726235f606",
"0x092ca197dda6c519d80296f4fcda2933df9608ec684ad000133259024041d070812d29b058a998cf7ffc647b2739041725d77889f58953799c6aba6d9e5b981fc8",
"0x091c25a87d321a09ad2a149d1a7eaa77727c7feffb4c39caf44f8edd4377f7bd0c16d1091494d3c90d301c1cb4596692798e78e4cc3d53c3a08e2641de43f9da18",
"0x092166058c98245eb85b08da1c569df11f86b00cc44212a9a8ee0d60556d05a8030942c68b535651e11af38264ecc89e5f79b66c3d9ce87233ad65d4894a3d1c3d",
"0x0908c3b13b7400630170baec7448c7ec99fa9100cad373e189e42aca121e2c8f450f9e40d92d98bb0b1286a18581591fddfa8637fc941c1630237293d69e5cb98f",
"0x091362d251bbd8b255d63cd91bcfc257b8fb3ea608ce652784e3db11b22ca86c0122a0068fa1f1d54f313bed9fd9209212af3f366e4ff28092bf42c4abebffe10a",
"0x081d67961bb431a9da78eb976fabd641e20fbf4b7e32eb3faac7dfb5abb50f1faf1438d77000c1cf96c9d61347e1351eb0200260ebe523e69f6e9f334ec86e6b58",
"0x0819324d2488778bdef23319a6832001ee85f578cc920670c81f3645f898a46ec62e00385c4416ca4ccbab237b13396e5e25e5da12101021c6a6f9ecfe7c7fed19",
"0x041421380c36ea8ef65a9bdb0202b06d1e03f52857cdfea3795463653eaa3dd7d80101000000000000000000000000000000000000000000000000000000006239b5a2c000208391082587ea494a8beba02cc40273f27e5477a967cd400736ac46950da0b378",
"0x000a52b818e0a009930d62c17f2b1244179b7c14f8e1ae317fb3bfd3a3ba6060031b2a4aa2df31e79f926474987eea69aab84f4581cfd61b0338438110f6be145b",
"0x001684ff1ef6ea054c5a6a5cae45f9280dacfc10c6cde39d1f64a00ad3c77549fe1c14ff8a628c0244ba48d63610e5d0b514c1b7b60301b6f27f77a435caf8bd60",
"0x001a2ba0ad7d6447d3c2476aa2e6bd04ab552ac1840450ce11f338f58a80fcdf420df4b9fc89108a0a44d844d981abe44d5ab20a5a101d07e94d131f07bf83ba62",
"0x0007158ec8942174c68bde0ab3666eb29b3c5784693bbfcd21126789d98bbdd05409f0313df8ddc438abe4798854f30c9daa2274950ce833a2de21e09b8b2c11b2",
"0x000ab27b84c73e447618f030ad9d621b0d61cc783e7ae5671ffcd3ff479b5093fe173d6126fa71986aa679b5384a2dc25f3a15f806a546e933f9fda6ac0a3460d9",
"0x0024ca9a7c6b7bf77c7a7acdae9d8e551b08ec6adf30abb7d1c45a6bbd5058ea921802170d5cc7de7d294cf6c67b0ac0208fe76497803554fb5bba9f78721568eb",
"0x0018a60c68b26022ced26cce2be1af1d6b33f4c16596d1ba18d5f47fea98ae490b12e66678391e289de1cf981c122e765265b94f0669614d94847480a77c2d3b74",
"0x001a776d5e5902c9a073c86a71ee80d167d6e2eb92150df2afb3d87f18b2cce6f02af158ba1cfbc643b36c1e001b59473cc88663b44c8d70739a27b804ec387146",
"0x0012cd2c1070b0d2eb215eb760fba9b843bd5c732102ce9773701076b0e37a437e136901c4ddc1cdbef42f46af629296ca5965b41a53cce65237612cea27477076",
"0x002bf94aa1fcb474365039e949bbbeabe0162ffc490b1b63ffe0f84bf182a8bf16169fe345e742d176a80f6e733177736d93e40fc9fdd4866efa6cc45ad94e9577",
"0x001a2e6e1b585fa0564fc606c3d62c26d9a113d75430966ff3f500e450c762edeb24fb1e5456ed4313d9418a1b073ae8b3f852f0f8435752bbbe65d21726ddb873",
"0x002529704fb28f7d3f9d2f3e9d38b000b6bfc2a21cb0a1955797016536066307d70ba7397326ecf50b98153f9e3baa96608efdf7d772b1ff28649bef677860dba9",
"0x0022f4f22a1d85ac83a56e7031559cf874c78a2f2ee6b6b93625f588313964a6d0052f6c873c6417d409c2a5317b31449b36fb4faede558d03b448b06b4a198daa",
"0x0017167b295954b29f62d7347dab3158aedc8586d5aa233d3f69c14bc7fe31eb840000000000000000000000000000000000000000000000000000000000000000",
"0x002d7bed0c0f0318a6fc60f903f4a42841cc4fa431ddf1a97fc34f35d6a267434b2a1a818d75328089a9578143e31b1f535517e09ff50a728b100483e712c8bc9a",
"0x0126ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111346048bf18a14a209505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
},
{
// curl -H "content-type: application/json" -X POST --data '{"id":0,"jsonrpc":"2.0","method":"eth_getProof","params":["0x5300000000000000000000000000000000000004", ["0x0000000000000000000000000000000000000000000000000000000000000002"], "0x1111ad"]}' https://rpc.scroll.io
block: 1118637,
desc: "WETH.totalSupply",
account: "0x5300000000000000000000000000000000000004",
block: 95216,
desc: "contract with empty storage node",
account: "0xb75d7e84517e1504c151b270255b087fd746d34c",
storage: "0x0000000000000000000000000000000000000000000000000000000000000002",
expectedRoot: "0x1334a21a74914182745c1f5142e70b487262096784ae7669186657462c01b103",
expectedValue: "0x0000000000000000000000000000000000000000000000600058d1a5ce14104d",
accountProof: [
"0x0907d980105678a2007eb5683d850f36a9caafe6e7fd3279987d7a94a13a360d3a1478f9a4c1f8c755227ee3544929bb0d7cfa2d999a48493d048ff0250bb002ab",
"0x092b59a024f142555555c767842c4fcc3996686c57699791fcb10013f69ffd9b2507360087cb303767fd43f2650960621246a8d205d086e03d9c1626e4aaa5b143",
"0x091f876342916ac1d5a14ef40cfc5644452170b16d1b045877f303cd52322ba1e00ba09f36443c2a63fbd7ff8feeb2c84e99fde6db08fd8e4c67ad061c482ff276",
"0x09277b3069a4b944a45df222366aae727ec64efaf0a8ecb000645d0eea3a3fa93609b925158cc04f610f8c616369094683ca7a86239f49e97852aa286d148a3913",
"0x092fb789200a7324067934da8be91c48f86c4e6f35fed6d1ce8ae4d7051f480bc0074019222c788b139b6919dfbc9d0b51f274e0ed3ea03553b8db30392ac05ce4",
"0x092f79da8f9f2c3a3a3813580ff18d4619b95f54026b2f16ccbcca684d5e25e1f52912fa319d9a7ba537a52cc6571844b4d1aa99b8a78cea6f686a6279ade5dcae",
"0x09249d249bcf92a369bd7715ec63a4b29d706a5dbb304efd678a2e5d7982e7fa9b202e3225c1031d83ab62d78516a4cbdbf2b22842c57182e7cb0dbb4303ac38c5",
"0x0904837ebb85ceccab225d4d826fe57edca4b00862199b91082f65dfffa7669b90039c710273b02e60c2e74eb8b243721e852e0e56fa51668b6362fd920f817cb7",
"0x090a36f6aabc3768a05dd8f93667a0eb2e5b63d94b5ce27132fb38d13c56d49cb4249c2013daee90184ae285226271f150f6a8f74f2c85dbd0721c5f583e620b10",
"0x091b82f139a06af573e871fdd5f5ac18f17c568ffe1c9e271505b371ad7f0603e716b187804a49d2456a0baa7c2317c14d9aa7e58ad64df38bc6c1c7b86b072333",
"0x0929668e59dfc2e2aef10194f5d287d8396e1a897d68f106bdb12b9541c0bab71d2bf910dea11e3209b3feff88d630af46006e402e935bc84c559694d88c117733",
"0x0914231c92f09f56628c10603dc2d2120d9d11b27fa23753a14171127c3a1ee3dd0d6b9cbd11d031fe6e1b650023edc58aa580fa4f4aa1b30bf82e0e4c7a308bb9",
"0x0914c1dd24c520d96aac93b7ef3062526067f1b15a080c482abf449d3c2cde781b195eb63b5e328572090319310914d81b2ca8350b6e15dc9d13e878f8c28c9d52",
"0x0927cb93e3d9c144a5a3653c5cf2ed5940d64f461dd588cd192516ae7d855e9408166e85986d4c9836cd6cd822174ba9db9c7a043d73e86b5b2cfc0a2e082894c3",
"0x090858bf8a0119626fe9339bd92116a070ba1a66423b0f7d3f4666b6851fdea01400f7f51eb22df168c41162d7f18f9d97155d87da523b05a1dde54e7a30a98c31",
"0x0902776c1f5f93a95baea2e209ddb4a5e49dd1112a7f7d755a45addffe4a233dad0d8cc62b957d9b254fdc8199c720fcf8d5c65d14899911e991b4530710aca75e",
"0x091d7fde5c78c88bbf6082a20a185cde96a203ea0d29c829c1ab9322fc3ca0ae3100ef7cba868cac216d365a0232ad6227ab1ef3290166bc6c19b719b79dbc17fc",
"0x091690160269c53c6b74337a00d02cb40a88ea5eba06e1942088b619baee83279e12d96d62dda9c4b5897d58fea40b5825d87a5526dec37361ec7c93a3256ea76d",
"0x091bccb091cde3f8ca7cfda1df379c9bfa412908c41037ae4ec0a20ce984e2c9a51d02c109d2e6e25dc60f10b1bc3b3f97ca1ce1aa025ce4f3146de3979403b99e",
"0x0927083540af95e57acba69671a4a596f721432549b8760941f4251e0dd7a013a917cee0f60d333cf88e40ae8710fb1fd6e3920346a376b3ba6686a4b2020a043e",
"0x082170b57b8f05f6990eec62e74cdb303741f6c464a85d68582c19c51e53f490000a5029a62ddc14c9c07c549db300bd308b6367454966c94b8526f4ceed5693b2",
"0x0827a0b16ef333dcfe00610d19dc468b9e856f544c9b5e9b046357e0a38aedaeb90000000000000000000000000000000000000000000000000000000000000000",
"0x06126f891e8753e67c5cbfa2a67e9d71942eab3a88cde86e97a4af94ea0dde497821fb69ccdb00e6eaeaf7fc1e73630f39f846970b72ac801e396da0033fb0c247",
"0x0420e9fb498ff9c35246d527da24aa1710d2cc9b055ecf9a95a8a2a11d3d836cdf050800000000000000000000000000000000000000000000000016ef00000000000000000000000000000000000000000000000000000000000000600058d1a5ce14104d0dedcaecaab39b6e22c2608e40af67a71908e6e97bbf4a43c59c4537140c25a9e8c4073351c26b9831c1e5af153b9be4713a4af9edfdf32b58077b735e120f14136a7980da529d9e8d3a71433fc9dc5aa8c01e3a4eb60cb3a4f9cf9ca5c8e0be205300000000000000000000000000000000000004000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
storageProof: [
"0x09240ea2601c34d792a0a5a8a84d8e501cfdfdf2c10ef13ea560acac58661882dd1b3644d1d4f3e32fc78498a7ebeffac8c6a494ac6f36923ef1be476444c0d564",
"0x0912af3ac8f8ea443e6d89d071fccaa2b3c8462220c1c2921234f613b41594f08f2a170e61f5f436b536c155b438044cf0d0f24b94b4c034ad22b3eae824998243",
"0x0916011d547d7a54929c3515078f4f672c6b390ccdd4119f0776376910bc5a38da1a059ed9c504fadcc9f77e8a402175743bee1f5be27b7002b0f6c5b51070452c",
"0x092293af71b7b9315c32d08f06e291b85e3b3dbba786dd31952369f666281aa21125ab35feae70aaca9349f6af48f7dcf2dee0324e4eae03e929963e7728b633a3",
"0x090607033a4b976c1e4683298d66b88a95ed45033ff43dea0670d84a8c42d35bf12562869385c0e70f561f18be4b78e7276b837f140a45ab12ffef1ba4ad5faecb",
"0x090abc5f713c2f58583114bb5081d00cbd01789d8efbd95e471b151c71c475142f0f52ad30f8a63288eb9dd12aca2a670de08c03f8384f55d730c943e1c472625b",
"0x0905156e8704d6195f6ae562aed2072f4e32422c6dfd4840ca354b9c4d2de5ce760fca52b1e0689ad374bae9fbea262a929f919695149a083fe6bacb806dc02fca",
"0x0917078d4c193a3fdbfe8ce3a235a0e1df89e626b5e91636097e299883fc2447892ad46eefbb27909544fe02c05e29760315749f6ce21c17c52158f5f5616c2dad",
"0x0917d02e5da8bdb969149c9327b247a6aaa479bcda4a03665da5103c10e616d2f40ccabdacdd25b34235d26e50e7af5d8d312a2cafdcadd41cc589a71a322f254c",
"0x090c62f5c476c1def8ed8a8c25ae54581690b39dfab4b0f3f78b93df96f626714328ea922a76a058087563bb5370664e9a1cebe3062f2d904bf5e3a018219d6563",
"0x091e481971f770e587b1f62f1da9ac4687abc5b2a23097fc38332e15ab957ca0ab0ec0a95c15313887e0d2f166c100deaf17f2ce50767680e6e5b2e3068801c0cd",
"0x0911799e186f1bd299dfa08c07404b9d28e2b179fb6ad523f1846872537b6db85f198b573ac1397048258de38b391fcc5e0c86a0f81f4ca607785fb37041ab8b4d",
"0x092053a028cf3bfcdabcb58985efc39f078cb0bcae4439528a0b6fe4b24bbdbd2c019a04a54e9e96077f3c2c39c1602a83387018b6357ea4c28e96764865d1c8f3",
"0x07303fad3e4628ccae4de1adb41996c9f38b22445b6525ff163b4c68cbde275b1a06111cae9b4d17b730d94f589e20c6ae2cb59bf0b40ad05bf58703ee6d46eac4",
"0x0606bc3fca1f1b3c877aa01a765c18db8b0d7f0bc50bd99f21223055bf1595c84d04fdc0fd416d8402fde743d908d032a20af6f2e65cdc6cc289f72c04f1c2476f",
"0x04020953ad52de135367a1ba2629636216ed5174cce5629d11b5d97fe733f07dcc010100000000000000000000000000000000000000000000000000600058d1a5ce14104d200000000000000000000000000000000000000000000000000000000000000002",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
},
{
// curl -H "content-type: application/json" -X POST --data '{"id":0,"jsonrpc":"2.0","method":"eth_getProof","params":["0x5300000000000000000000000000000000000004", ["0x0000000000000000000000000000000000000000000000000000000000002222"], "0x1111ad"]}' https://rpc.scroll.io
block: 1118637,
desc: "random empty storage in WETH",
account: "0x5300000000000000000000000000000000000004",
storage: "0x0000000000000000000000000000000000000000000000000000000000002222",
expectedRoot: "0x1334a21a74914182745c1f5142e70b487262096784ae7669186657462c01b103",
expectedRoot: "0x2dc794537b959b575dc216cd11389d802f9389fce7183278561a824aa8e950e2",
expectedValue: "0x0000000000000000000000000000000000000000000000000000000000000000",
accountProof: [
"0x0907d980105678a2007eb5683d850f36a9caafe6e7fd3279987d7a94a13a360d3a1478f9a4c1f8c755227ee3544929bb0d7cfa2d999a48493d048ff0250bb002ab",
"0x092b59a024f142555555c767842c4fcc3996686c57699791fcb10013f69ffd9b2507360087cb303767fd43f2650960621246a8d205d086e03d9c1626e4aaa5b143",
"0x091f876342916ac1d5a14ef40cfc5644452170b16d1b045877f303cd52322ba1e00ba09f36443c2a63fbd7ff8feeb2c84e99fde6db08fd8e4c67ad061c482ff276",
"0x09277b3069a4b944a45df222366aae727ec64efaf0a8ecb000645d0eea3a3fa93609b925158cc04f610f8c616369094683ca7a86239f49e97852aa286d148a3913",
"0x092fb789200a7324067934da8be91c48f86c4e6f35fed6d1ce8ae4d7051f480bc0074019222c788b139b6919dfbc9d0b51f274e0ed3ea03553b8db30392ac05ce4",
"0x092f79da8f9f2c3a3a3813580ff18d4619b95f54026b2f16ccbcca684d5e25e1f52912fa319d9a7ba537a52cc6571844b4d1aa99b8a78cea6f686a6279ade5dcae",
"0x09249d249bcf92a369bd7715ec63a4b29d706a5dbb304efd678a2e5d7982e7fa9b202e3225c1031d83ab62d78516a4cbdbf2b22842c57182e7cb0dbb4303ac38c5",
"0x0904837ebb85ceccab225d4d826fe57edca4b00862199b91082f65dfffa7669b90039c710273b02e60c2e74eb8b243721e852e0e56fa51668b6362fd920f817cb7",
"0x090a36f6aabc3768a05dd8f93667a0eb2e5b63d94b5ce27132fb38d13c56d49cb4249c2013daee90184ae285226271f150f6a8f74f2c85dbd0721c5f583e620b10",
"0x091b82f139a06af573e871fdd5f5ac18f17c568ffe1c9e271505b371ad7f0603e716b187804a49d2456a0baa7c2317c14d9aa7e58ad64df38bc6c1c7b86b072333",
"0x0929668e59dfc2e2aef10194f5d287d8396e1a897d68f106bdb12b9541c0bab71d2bf910dea11e3209b3feff88d630af46006e402e935bc84c559694d88c117733",
"0x0914231c92f09f56628c10603dc2d2120d9d11b27fa23753a14171127c3a1ee3dd0d6b9cbd11d031fe6e1b650023edc58aa580fa4f4aa1b30bf82e0e4c7a308bb9",
"0x0914c1dd24c520d96aac93b7ef3062526067f1b15a080c482abf449d3c2cde781b195eb63b5e328572090319310914d81b2ca8350b6e15dc9d13e878f8c28c9d52",
"0x0927cb93e3d9c144a5a3653c5cf2ed5940d64f461dd588cd192516ae7d855e9408166e85986d4c9836cd6cd822174ba9db9c7a043d73e86b5b2cfc0a2e082894c3",
"0x090858bf8a0119626fe9339bd92116a070ba1a66423b0f7d3f4666b6851fdea01400f7f51eb22df168c41162d7f18f9d97155d87da523b05a1dde54e7a30a98c31",
"0x0902776c1f5f93a95baea2e209ddb4a5e49dd1112a7f7d755a45addffe4a233dad0d8cc62b957d9b254fdc8199c720fcf8d5c65d14899911e991b4530710aca75e",
"0x091d7fde5c78c88bbf6082a20a185cde96a203ea0d29c829c1ab9322fc3ca0ae3100ef7cba868cac216d365a0232ad6227ab1ef3290166bc6c19b719b79dbc17fc",
"0x091690160269c53c6b74337a00d02cb40a88ea5eba06e1942088b619baee83279e12d96d62dda9c4b5897d58fea40b5825d87a5526dec37361ec7c93a3256ea76d",
"0x091bccb091cde3f8ca7cfda1df379c9bfa412908c41037ae4ec0a20ce984e2c9a51d02c109d2e6e25dc60f10b1bc3b3f97ca1ce1aa025ce4f3146de3979403b99e",
"0x0927083540af95e57acba69671a4a596f721432549b8760941f4251e0dd7a013a917cee0f60d333cf88e40ae8710fb1fd6e3920346a376b3ba6686a4b2020a043e",
"0x082170b57b8f05f6990eec62e74cdb303741f6c464a85d68582c19c51e53f490000a5029a62ddc14c9c07c549db300bd308b6367454966c94b8526f4ceed5693b2",
"0x0827a0b16ef333dcfe00610d19dc468b9e856f544c9b5e9b046357e0a38aedaeb90000000000000000000000000000000000000000000000000000000000000000",
"0x06126f891e8753e67c5cbfa2a67e9d71942eab3a88cde86e97a4af94ea0dde497821fb69ccdb00e6eaeaf7fc1e73630f39f846970b72ac801e396da0033fb0c247",
"0x0420e9fb498ff9c35246d527da24aa1710d2cc9b055ecf9a95a8a2a11d3d836cdf050800000000000000000000000000000000000000000000000016ef00000000000000000000000000000000000000000000000000000000000000600058d1a5ce14104d0dedcaecaab39b6e22c2608e40af67a71908e6e97bbf4a43c59c4537140c25a9e8c4073351c26b9831c1e5af153b9be4713a4af9edfdf32b58077b735e120f14136a7980da529d9e8d3a71433fc9dc5aa8c01e3a4eb60cb3a4f9cf9ca5c8e0be205300000000000000000000000000000000000004000000000000000000000000",
"0x001988ce414103e97cb80613adde47d5fe0611b30087d1cfcdb84284c42907467e24ac744be2edcb86cdfc42a9bbb7b2a270649161c3ce3a41d3ad5a26927d2c79",
"0x0028db7c407cab6652f1f194401bd87bda33c9a1723b4f93515bd5929cad02668123fa5a3e69136c8e03a62c805f89c9d3578a6f5fac4bb281fc4d7df12fbcc5db",
"0x0006801926f00b574e3a88162d192482fecba9918b77e133dd77587d9efaf5c7861712d244ac8ad4bc0bffe0dbe8ab261865c9a69b4b7769e9c188ec048460ce78",
"0x002f3161746c2c70c7cefb74c07bc16b28bd9011343f5c6f8756471cd0b184601a25d05d5447a572452964b3c20f40ef841bf313c958e82a6923584e20496df67f",
"0x000efef3e3e174da6f3f451b5a2652d2489fff449a217c10841e68e4a15995d6521c4b1552c592020fbc7219c5d67ff00bd630db8102ce5c6ca12bea29b80ba5e5",
"0x0019b4749b17792c0ad9f7b460a0faf35400da9423be38ac5c40e81c805acc72592c0b933e1c25d05db98d98fc4f04b27610b2ee88281126099aed42f27cd96b00",
"0x002b8d563c5041f28afa38da01b6ec9e7278250be79f7f55e2586955e75ab75fad2055ea72cd44209c41c94ddfb980fe5b007b3e997085bc1fe5b514f72f860c05",
"0x001335698617876fcc272740f765d53d53ee511dc9dc33965aaa0a5584f2f0fc02274c435ba9cc0fd5b897350de8cc1837d3a2baaa54ef3f9c66f689f20eddaf1a",
"0x0010f766b8dbe13e3f27f45da3ad7e5c31fd1c11c51f4f892851519182cdc9348921c10d83a16e057f99623dcd68ab28a78e48b655df756245631521d04e85e583",
"0x002bb5fce9df47073438d61ee438d900ab4ab01ac7f053e57c6ffe3e8f1746285016a600e6b7ee90281bbc3bd9b9523a663261cda2208ae98efcf76df8c965fb76",
"0x002cad2eb5194b59d880565b03cd667a842923c1310a60bd818685c8fe4120d86817ee8bfffdb490f78f23d6fb38bb1c27f10f877c5017b8b2c21ad14f23df0eab",
"0x001f064044ca94d6f30ef93ee1bb6ae35450acf1c8f5b113b0f0ff39e4b65cfb9a25141ae7fc30c69000991e65c626c1b12fb76bca02c68f8116d15698a5934b71",
"0x0014382fa3481f424cc33c39f77fd3df54c5951be347c629ab5baec238e46cab050b2b8bec8ebdbc97dd6c0ab867aae5746e51b69b7b8177c96dbc0c4531521d3e",
"0x0011941db7a46d1a3ddbd27a4a57a0ce1865b6e224552b233d9d545745489257f408c8e3a0a147e117dbb89827018a2df52d124cee29e82b15643e4877cabe4d06",
"0x0000d7b8f99e5f148297bf4bf7e5133f87dbdf1932dbb152e0cb14c472c7d26f26146c4f72b903bb98b0855c1ca5bef4bada14a773dcda341d10402004e999d757",
"0x0104eeb1fce36df4d3f6423137af3855d16bc936184295529c58682bb5217d64d905080000000000000000000000000000000000000000000000000867000000000000000130644e72e131a029b85045b68181585d2833e84879b96ea2850beb8e012d423615fd9926356a5b1f3a4599c7cccd6df3b45097b6527756e572b90fc8c40496f831f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb8351cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c220b75d7e84517e1504c151b270255b087fd746d34c000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
storageProof: [
"0x09240ea2601c34d792a0a5a8a84d8e501cfdfdf2c10ef13ea560acac58661882dd1b3644d1d4f3e32fc78498a7ebeffac8c6a494ac6f36923ef1be476444c0d564",
"0x092fa31ba6c9b8f291512a582ab446daf7aa3787e68f9628d08ec0db329027d9001af83d361b481ed4b943d988cb0191c350b8efc85cfceba74afb60783488d441",
"0x092c2ec2d967208cb5088400d826b52113d606435be011b6c9f721f293fb12242515681c9016eb1c222dcdbeeeb9fd3a504caba892f4c1832741a2b17a7305598a",
"0x090c7fe825c29bf5df80c7101ff8a372ba4f7b2ac37c16a3bbda38cc1e38e682460499b7e5d21d3784f496e747140f465eb1a39a019d2be8baf13a5e39f359a4ed",
"0x092bb11ebbc7cd1e565b86498aecab16842ab3fa852c7943cfbc49ee4bc593b2f308a78e1bc555e07d36d5c812af57c18f67199197a52ff74bc4e32ca6b7fadf32",
"0x092fd1e042080801034c6d6c79d462016c74b97dfbb1272cf606e638911a08f21c02434541eeed6d66002c69042f9354211e40518316a2d98cc0da0f19fb1ea013",
"0x09024bd491ec707bc3e8bea6b2754f37b1e85903061aefabd945537eef2f4d38b4136b925b004d29603c5e6195e073322d27f0c6ea3fa1ea5c5b248ff60dda594c",
"0x09269e1f468bd9bbde77a13562645a80a77d26d801781ca95d385bd59ee1b0890b03694bf9043190620265bf0bc3baa4d82cc82302ae0bbf33cfa48b0ec9d5ab25",
"0x0924d8bf62b2a725684847208dc021d5aee9f3c8f14c14786bc9f93232dfd3e068120bb7d022bbb159b4b84bb9e36cd2fcd89d761e265c1b88c8bdb9745a51cb22",
"0x092680f932920fd86de0b417cfdbeb2836a470213097ed5abb1a2b4deba8437f6825fd0ec614b97e6cfa4d50b08ad1e0fd8a5cd72db3a468128d1045d6a54e5e6e",
"0x0909e630914cee4db538057a0218a72288b88b2603aee0f805254b865a03de87c92ce46c1aa77ee8c42bb60c4175826f4dbb89d6282c01ff3de654c961599e66c3",
"0x091a17302d53ad1b7a4472d111fd27b35720d49ce27259b5e42f46339dddf235e82b973c29f44cf69b589f724d7d2fa54bf38b37bde3fc66c0d965a8c10df80caa",
"0x0916572156ae22ae2b0bc84ff41d16668be7163da26db2b13b86c218e0516c97a4131b584b7192464dde26060f66f678b03c8db8f64f1cd7a1f98a22a90cce5850",
"0x092c6ee2ca598c123445bbbd403ca3ab8a95ce2443f941ebdcf7bb035e2a3e38e22e8d5b222a1019b126f0ecf277c7fed881413e879cd4dc5df66634b6e9fb688d",
"0x0700000000000000000000000000000000000000000000000000000000000000002822301c27c0bd26a8f361545a09d509a2feed981accf780de30244f0300321d",
"0x05",
"0x000c180cb3d57f72eb405dfc667d167967e4709cf3722a87b4c924f78a1d8fa9e926d16eb1f4902f8ac7a48fdf98274c9c4061f9f14f783e2fb41ef50c53d5f8ad",
"0x000f78c968ee196c478c91d12a48edfde6c630d40203652c6420ff5aa3619549a4297615606d62866169d509f77c9cb38751ae282cafdc27caf891585b383b4795",
"0x000798716960783afdcfd0749aa3b316d6e3d6ec2724853e629b42b5a9a10208e02e5f5fe3d5b8b823d3481aa1e738a1a24d6d1a63116e0003044672d73a7df2e4",
"0x0014748f61c4954d239225204b4611d66384f08ef03db3da82957fd590ee00b6c92b873e4bd217f8dfb0fa29bca1087ac7bc29db616a6830ba456091bab772ac06",
"0x000a1c900952239e98f5f1a3009e623bf6cf533d3b0d6d13d28d04f0496761927c0be199ff86f081ebb1c413e850450a4cce01dfd2c455156d7abde31385ae2ab8",
"0x00028d4e89bc6ce55b5e6bba0f2f3758dafcdb4722e6c1a06f6faa8bae065bc8ae0644641c0ac696c265b3ec90889e3842c9a7a5902f1a5e807c5767ed49106982",
"0x001e8434bf68ee6077d88efb5449ad286455a522e63a6bce5544cf785b77a5842d041a4e324bc47aa8ae42b56446f687758a8091986b6d760fd283a9e097a64e3a",
"0x00250bc6ba916a2acb3ce53053a88be40b815fa749d144dc709a7a46a08361e83c05b2b5b05f45324ab921e04ae1278371ebe1e092203259f4e5306eb46ad50f8c",
"0x0011c208e2c536c37674b1ecafff0261146c326c939544781da7062bbd0ac2fbca246f5225dc41e9fc17fe531f5bdc3325620e4003b3310a2cf7e31011b19c68a2",
"0x001dc8d4177945ac89a3c61977ed787e50c9d8a0c5d85dd6b1409ec11213b324e6228005b222573db7882205be776a5bd2183944b6fcf63af604e31b9285bd010e",
"0x0014ba74da33d2ca27e3f78bc1bd052c2b92176ce4136df751a8229051de383c2b0c8994f02704420f1f84963281364401d00f6d5aa9b6f52135bd96159c1c3b9b",
"0x00188c7ee45a6c28fa7ad49a86206b70764066b1888b0de90e4410d7132a641f8b0eecbba072e28ed6705379104e30dd2557c47b30be7dd5e8c893b8a641d02701",
"0x0010fb29a3bb8191eb03bd345ad1995bf6a57f09929f72dc8a9c42435c2eef734b1d565bfc8ae78d6c1496f2bdfeadff6890e8ddef4c6b730a5ec8575344800c90",
"0x001b2abe5a1352c492c3ac47d2ff93896977a99a0783eedadc6246efc9b4e78ab408291f4e9234e4662a365f40090e1b323e3448fa2f6cdc9c929477095499c323",
"0x00083b5711eb1cbba5e79c53227057d4987a22dd22b5ef715bf21f558917f48b17027f174fd4ca77e412ca65a7fbf6151e4473fa909ea384c7687b45f860d0103a",
"0x00100158ee54f61ba5b093a43a348cfd202c87ba1533af2b24fc2f068de89a8d15100f3cc72c206d05d44db4272bd67db89bc6e5c86d7c1b03b40395ec4661595c",
"0x002a15c17fcf2a10c6d1bcbd59ae262f80ad33518d499059a668e115045069ef012788a404ba41b5f8a96f0b294d0ba91e65b1bf58eee74adb8e55ca12f22fdccc",
"0x00031177585837e616bc830056a4bd12821c9c779096df361ebe1d77379e96ff9e0000000000000000000000000000000000000000000000000000000000000000",
"0x02",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
},
{
// curl -H "content-type: application/json" -X POST --data '{"id":0,"jsonrpc":"2.0","method":"eth_getProof","params":["0x5300000000000000000000000000000000000044", ["0x0000000000000000000000000000000000000000000000000000000000000000"], "0x1111ad"]}' https://rpc.scroll.io
block: 1154766,
desc: "random empty storage in some contract",
account: "0x226D078166C78e00ce5E97d8f18CDc408512bb0F",
storage: "0x0000000000000000000000000000000000000000000000000000000000000001",
expectedRoot: "0x1e5cf13822e052084c315e944ca84f1ef375583e85e1508055123a182e415fab",
expectedValue: "0x0000000000000000000000000000000000000000000000000000000000000000",
accountProof: [
"0x09062c633f6d7c7a157025fef8ab1c313a7caadda3a64b23664741f9de3b0478fe27571cf9b45d5f4deddf5f0b5354a613998fdcbe9249bb7cde92fd45513c5a99",
"0x0920d6877efe14060018278754e91682430401880981fec1cd1b63610bed0c1e332a63aca7a8898b01983e2c53a7257310318da444fd6c8b705e488943205301a8",
"0x090f6dadd53bbc0f5fa4fa03961aff0bf252ae335e11c1836253b6bc214d66759010b10d80991219a66f1eb7e07169b4cec4fa74b04edbdc08c3f238dfdf1d2fac",
"0x0921ea10af71b5f3587ff9d42178a151427cbcde37b8bee6575463bf6b83110cca0520d5f97b44e7015453ec16d9c28980d2cec3df5c860eb8a455f49dcfa339be",
"0x092d19cf96a7c129aac6f72f780703a9ef3233fc5124d592baee751a3550dd692a02c962b87efbba5aeea4856c3df29c1ea540e1fbc7a74529d5dc793fe8e490d8",
"0x0922e20a087e600560007189ccc1a159e4fffeb1876a6de3772b7f450793a1c6620ada74791f3ecd25a650701578ef9661c64e75d836c681503e96228974a53903",
"0x0924839671b636ebb56cb9a2860a3edf2a2875774e84dfcf8546135189f808d724260ac8be541ff088a9a1d2468c4c6e2faa793009be553a3cbca003649ee511db",
"0x090cd8140d844f62e44ffe820c1b2b0d4aa8f0518c15ff61759d93d805cb017cb628d5b46a4c4ec0a10eb00155808890925050f7af2279b512c25005d963283262",
"0x0913c0698673b0be011485eba05c61ac41bf14fc960ce5dbb6f5a021809eabbb0e18adaf85a3724e1a644268b845f5014b39e574928b9a01bfcd25d6fe1cf03e8f",
"0x0912c2e7da4b091c52e0012e5c13baf07d9d9daed10a558262d2e700a7c823300e054dce1849561bbeede4368a3be06f5a2bae06bdb1bc2bcefdba84634fd1991c",
"0x090b3e9c665497a0f9c1d3f1448c6d9144a287eb0accf86fea6f443f51986df7130392814f078a19643081787478ec3a010e2757a574877a194136c529813cf7ae",
"0x09249a0e273abe79a0b99a55516e19213191b7f77ef34f8815edc4e1ede8711f7920615adbac1983d844c8a6ed50922562432c13d030069d8b3e92611b4fe39531",
"0x09199575893e55d92fafb3b067130b9b6b5a46e7f6fb2d0af412d12591632dfe961adffb9dd1e7490095aac94bc1fcaeb591f4ba907fe2b882c9f6d8f7ab3a1809",
"0x09259308e9398f029ebbe31a4b353f474622b4c96995b7365c3b13c392fcc3e7001be60286a497a3886aa9cff3ad6a5dc71504078eb7a44c43530b7b33eef4743f",
"0x090709a21aaf18a1eaea3b925ab36f47a82095aa3e9ddbc4f01463005c4b64f6af0554d854637fcbfd9b1a4c2474de343950569e4f855d66f2ee14fcfb19ee17f5",
"0x092d7319be75a70b8ea5f0acc6ab4a96971ec546f72b18bdc3e905ad6ea8a288f70626499aee389335559b1dd3cc8b6711f9fde0c517236190cba24fa87993877a",
"0x09081b165a51e3081fc2e3e27d6fdb81134b65284851798de62899db3065a8c1fc040c8dce92508a510c2c34fc2949910dd41247c9f247cd216c03d9bb9d2881b4",
"0x092a27c5be32e1ab6e85d1ac094bc1509d92285f45c63fca6dba9b14d485a94af326d44c1ff85666a4790182ddd7e51cbbe06af81d62082e6d79faec29a4501369",
"0x091a46df6ffd6b439ffcd1b57e9548f5c4db26ade9e984efc8a91a01ab22134d3c1617b504ac2015793c5dac16d379b5ca6cb70c14243491bb68535ee686a3a553",
"0x08180e90f9f9a4fd8065a5849539793bd9e9340b69770eff1716a733241e454c341641f913f1c32e2c652b876f902e5c2c8d51c482411ec44dae969bdc50264c42",
"0x06273c162ecb059cd86ec0a01033dd61c39f59ee0a13eb41a28c0b2d49a45f6f94081be344adea9f54587a832b9efef6fc9ec010d86ec5fb2b53b5ff8dbabc4924",
"0x040b792f5b15327fc37390341af919c991641846d380397e4c73cbb1298921a546050800000000000000000000000000000000000000000000000000fb0000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000be74cc05824041ef286fd08582cdfacec7784a35af72f937acf64ade5073da10889249d61c3649abf8749bf686a73f708d67726fada3e071b03d4541da9156b20226d078166c78e00ce5e97d8f18cdc408512bb0f000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
storageProof: [
"0x05",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
},
{
// curl -H "content-type: application/json" -X POST --data '{"id":0,"jsonrpc":"2.0","method":"eth_getProof","params":["0xC73BfBD94fb1FD860997D4E76D116BDE0333BeEf", ["0x0000000000000000000000000000000000000000000000000000000000000000"], "0x2a7531"]}' https://sepolia-rpc.scroll.io
block: 2782513,
desc: "contract with only one storage entry",
account: "0xC73BfBD94fb1FD860997D4E76D116BDE0333BeEf",
block: 95216,
desc: "contract with no storage",
account: "0x9c0fc47d9346e2be1e24f6cef76149779fe52715",
storage: "0x0000000000000000000000000000000000000000000000000000000000000000",
expectedRoot: "0x13c6008daf17807163a056504e562d4adf13870306814b1a3877cda5297d5ae9",
expectedValue: "0x000000000000000000000000000000000000000000000000000000000000000c",
expectedRoot: "0x2dc794537b959b575dc216cd11389d802f9389fce7183278561a824aa8e950e2",
expectedValue: "0x0000000000000000000000000000000000000000000000000000000000000000",
accountProof: [
"0x09272d92cb48d19e41ef64be1da3e10026eb87d227132becb4fba0dd1451783de425f66c55ff0bec0b012e11d64aaaa6c322566d58cf45525cb05302132518f23d",
"0x0920908000907fe2260e41f8682510eee0572937459163ea1940c2eae8b2d5862e015e7c84f56f5948bfc9c242506d14f5c3c1b97bba1b262b40b108f5d7e69287",
"0x09078402c38a02e2b3dda819b761ca6448029f3dd42ae7876ac0dba0d762e3ddb818d80485f0a15f54f110aad9a98b00bdf9ccb56bbcb069552c7f6c10be1b9c15",
"0x09123243fe438606648fe3bcef5eb0165920315fb2b9316ce3ec0daac885577f190b84d9901fc150f52ed177f23ec31de4254b293c6eac2088009f3e13e3a08b78",
"0x09053c59663d3eafad212f58c4834090db4bfd0ba2b13d3108e0acade089a5da9229a75e0b30abc41d4fb252faf9f3aa8ef750b780247d83186cdc333635c25038",
"0x09163255ef0b1fdec7ec97c4e002cdeb6c963ca26d9d03ebdf78eb44dfdb57e4bd1fa9f68cc583c1e7019cc62133ede53e5636330de9a2c09e75f03760026e3729",
"0x09296d3cb1c4fd539ed015f2853649d20f5db111ce13c30b7e6efa4c9468741d1e0eea62adcf73aa5bdb4868cd776df429d26787f424beeda38f4ad19aa83e43e4",
"0x0908288df27fa423895de38ec5a52e809d99b683c5b32463501f5dad642b71387f0a3d37ae9df53b5cfdda0ac67765662e8a71a19b05d38f4a464596e129a35570",
"0x091a774fef4e8294fcca57d213846e51bfcf71249680e937e14248e532b47abd762ad72878f07f4abbba8bd13da9b75f681f35a748bb8fc078913e16a91bce783e",
"0x092799a146ba6b2bf4b6a24aef88c9590d9643d53f429438e348518a17af3d6e8d10e3b39898c3795c9386518438465581ca232445532fb549a8bddbdd6f4e0eed",
"0x0914c654d53c9f8656b784709decbd12ba786800a77c929f3b4255d59138b42dff282005f8997b73d64eeb112775885c4c08d2ee4e356cc2db58154dde48a0a1e4",
"0x091c71601a71f28ed0f6aeb59cf8a7bf29ce7dd3203352099920086e02007496260b811e85a0cd244d56f199b357d5c3a54f897fea21637698943679d07b33db8d",
"0x092a66de31cef7b4b195772a2b96edba3ca7d97a5bbe974d071c37f0d0ca0545be0be9ca0dd4c9d62ec3ba0a404713fefe6b62391ba3f6d283a47e83fdb18c3a4e",
"0x09093842042d196ae30784d31ed1526dd5d60cabe292eb5333e42936a2edbbaf1d237998efa424724063547c02cfa835ebfc24131315c34229b363f46fefda33ee",
"0x0911637da97122f89f421a4564d5328893ff4b5de123aecad06e07ea45d9622b87096a296e974b5eda0f2d35cb5531c4a55f3c1e181a8bb4a0b33399e7c93853d4",
"0x0921feeaba62a4ad78791d662737a3fa52a527dcd892f5d3af2cfbed4b6591d50f2fae639afb8ab4640a2d166616a4803442b26b9a8f5148a1c88adda1e2d911da",
"0x090ddbe424e9368f262ef80a144383fc4f518b27200f7a61a996a075b3b84ab5041c755907f230eea27d060fa827a5743c7046cd0dc7487047bc3a7d222d65d2d7",
"0x092d6e65349fd6751353b4b72fdd03d3ee4b1721efb434db76679c1c348b60fdc0177c7d961201138c98f85daf8a49b7a083a41e77dcd819d359b3db55c4a941a9",
"0x090b0d48518cb602b73a86bd7b2294d401e6ad4851e3c7549fc7d23eea017eadd72e3245236b50c7f256de16bae063df6221b8331443c9d3a79e867fd77dd85cee",
"0x07062bf32f202ec2afa65dfa84fffc76b5c05309768078544920f9b56d021606ce0b7371683425d088ad37f063ee847a9accac416314f1308cce69a8beeb2d2ab7",
"0x090ffc989b8556e69159e246cb74cf7a2e30df63e9b7dba76ede73996ab60d9799063ca19e1d436cea189d17c5d93b8da0fa11b3ee88de1030602d1e8087cbb3da",
"0x070000000000000000000000000000000000000000000000000000000000000000084f906a52b7da7bf35f3cc2431b40cfb90884c2ec0b579c9c096aea959509f7",
"0x0620b6c0072d699768c0b52df46b97dae979a14788ed54dad1d7ce67db6e036a07291784b726760c2d728e4084d95df6d1534e27284c8ae2eeb56a80210f37da2b",
"0x041245637ec55bae3c02f990e3cc3bf59cc05f515731cfa59ee55f8164953f8965050800000000000000000000000000000000000000000000000000ac000000000000000100000000000000000000000000000000000000000000000000000000000000000f68a43f5508e9c1f845406d9a507b612f97530746e59b93c8705f1a7cb0b93451e52f95aea13b1bc1f37dfbf797bfe7cea82a8c82da148f507e1ef2036fea8314b9fb07c4311e129d72b858c37b6bbe09c616f78416cb53d6e83360aff7b99c20c73bfbd94fb1fd860997d4e76d116bde0333beef000000000000000000000000",
"0x001988ce414103e97cb80613adde47d5fe0611b30087d1cfcdb84284c42907467e24ac744be2edcb86cdfc42a9bbb7b2a270649161c3ce3a41d3ad5a26927d2c79",
"0x0007af09eec4d7cc8903e99bd9fb9b8e57c30b0c3e34b17da769b01a9a1b943f391c4537228dbbfd7e7cce02123416bfdd61fb83577725516123b569eafcd8087d",
"0x0013a22efa6a843f6de1925fce2f6a83c7ed307182b16f661d0e7a8046561999393050830a440d2506adf42ccedece4e3aadc6bc80cea20fc1d8ed9e9c61597da0",
"0x001056a19427eac81b91de5db696812b3a0384bf41b37a12e9cbb7dc62404a102a1465c13c8d3721e137a64d9e5ba1267ac418339b3648bfab5a2a86f2343c2b4d",
"0x000794d2c0e19bc86772c2d1a43d46de87ad221847bddcfdffa19dbd34f3c3a9b507c5f198eb63c18640af5eff5480830147639cec070d276b778f21677d22ce32",
"0x000b23d93f98ec6e3536ffcab6afc6e2eb9b73aeb573d288723350366c05469e2e23837ffea9235351ee533af680d14011825e098f81ce3f8f59e9f08deff05e3d",
"0x002ad200ac8be8275ef12b8aeaec526d2b5255128968a2cd2ff775cab14e2ec4e907f2e9b849239e0e94332a50ac9d97320c56ca718c5e023cacd69f80b4c97c86",
"0x00284be135a2d7f5822a7949189b90696df39b1b183206c764576bf457df4fd1560204a9fc6c0dc199eecb404acfcabf4a633916fc94d2790dcd34959809c2195d",
"0x00270c2cd154aea3b575a1c7d47c62576bbdce6bbc7ccf5682e7962cf6cb77f0d317fdbac10917644860584c3057c750df695f529189f90910c30f114257719990",
"0x00174956df87889921e2a6ddb257fa84508fd7ea22c5a622b84378678e781a2289053dc6b3c4f91335b64f4b170bfe70bb5e2e316227b329d2b1205e7c62c4f755",
"0x002f9284ded18b8f281841094a93cb55b95884eec55d8eaa759c6175ddb2e037111c63bcee8ccf544fff55c3e502270e574d1f0b6265c4c7c6f42db5061b0120db",
"0x00065fdf05e66407d26a36a49d042c9c5e8cebab3baa2d3fd1ae6e673c3636cf7e2d9dbf3781e3f26f06fb503638a8bf00882f58dc83500338df4b7e08a290a5fb",
"0x00138987046c770f02f5d8e7d073f6c055536450fa55ccd2a23957598b6070297926f3a0b645072c5bd5c15cdcf03a4474e94d760e3a76fb8714b20b9d74608823",
"0x00280e7f8e278e02e43843aaba5a9a722a89af0ece06b5892284f825974e1c1984185be1fda9b5322a4c41023127eee438849ea23390e6c2d4d9abdedb5a1a43fc",
"0x00208f32072c6e20863710406ad34339da1124c639941e935818dd9ad9419849c91e0e37873df7eb190a2846789df889bbfd522200e2a41423ff9ab0acf2592be0",
"0x0005fb23491fabbc9b3eead71117b86a27952e8fd4b3380336ac3f479832e94bad109a1b6dca757696b8831d2529ffda29f37af36f92fec738376df77561491083",
"0x0028baee42b4a9a70b7ec1e50ea1a6817f812082a28598dca106aaecf2761fb63c06e5b589490c27f5cfc233890456ec47a7365ff2882a27c73968f4829d011b05",
"0x001708247f7a96b84cad27c31985cd39b6cc9435b4ec3f4db9aeed3311c213de651e2f271ae0fa011e5e6fccd121492400327efb915c95d85956a9cd27ceb4321a",
"0x0000000000000000000000000000000000000000000000000000000000000000002332e856217b3bab09901f1daa9ddc91edf56964e03675d260d00ffdf6e2e715",
"0x000571dce6fee951f457db89bae18abbd78b6b06504602a103133d2a38cabf5f5b1ecb13b03e3493e217c65da70baf4c4fad74808110658924869ba0e75d0871db",
"0x001738a6461148300d30699edb55d3b5bb62760aeb9384c07d61aa062c401f3a7d0000000000000000000000000000000000000000000000000000000000000000",
"0x000c14152707412177bbe1cfed882d7d7bdfca4e96be701a3c41bb3d254491f0bf0096ebc25015b9a40d4fe7490bda8ecb7f3a01e858d7833dce8f1993be4db07d",
"0x0117294cb69b0984b3a26d77eae252f9d8e438808bf276ee8c0c0546b7316c9bca05080000000000000000000000000000000000000000000000000ab1000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ed3aa0dd2cd363d4cea5d5283ec359f75be36a12ceddc7f80a58af9d39a418a02b6a0ff9eb34bf0e52f67047f95556a96c4f40822412da0c8bd0340996a754f4209c0fc47d9346e2be1e24f6cef76149779fe52715000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
storageProof: [
"0x041d3c5f8c36e5da873d45bfa1d2399a572ac77493ec089cbf88a37b9e9442842201010000000000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000000",
"0x02",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
},
{
block: 95216,
desc: "EOA with balance",
account: "0x0384a6f7e2588bb251688f9ab8d10932a98e9f28",
storage: "0x0000000000000000000000000000000000000000000000000000000000000000",
expectedRoot: "0x2dc794537b959b575dc216cd11389d802f9389fce7183278561a824aa8e950e2",
expectedValue: "0x0000000000000000000000000000000000000000000000000000000000000000",
accountProof: [
"0x001988ce414103e97cb80613adde47d5fe0611b30087d1cfcdb84284c42907467e24ac744be2edcb86cdfc42a9bbb7b2a270649161c3ce3a41d3ad5a26927d2c79",
"0x0028db7c407cab6652f1f194401bd87bda33c9a1723b4f93515bd5929cad02668123fa5a3e69136c8e03a62c805f89c9d3578a6f5fac4bb281fc4d7df12fbcc5db",
"0x0006801926f00b574e3a88162d192482fecba9918b77e133dd77587d9efaf5c7861712d244ac8ad4bc0bffe0dbe8ab261865c9a69b4b7769e9c188ec048460ce78",
"0x002f3161746c2c70c7cefb74c07bc16b28bd9011343f5c6f8756471cd0b184601a25d05d5447a572452964b3c20f40ef841bf313c958e82a6923584e20496df67f",
"0x0007602275f17f6c339ec3febc879c2ca72efa782ff1888b04553f82333eb0e60c068c8e4fe6da32f7f80a4acb50b690a7204581e5e4b8e9e7daa115dfcb466ae1",
"0x000cb512d4ab158b5e7d5852cc7531788f11e64e5959cc1233d7a64eaaca36426116fea9120cf06c241843db50d81978b402281dfe15ba7d8a8c689bfbe0b31a1a",
"0x002eb4fff0642f7be6d8e95793d9371d606df48efd0b62a7eb01b0a9669307be2b0ee7d01463afc3dac441f66e675ba06fec67b692e3f7a46510d096836468a3cb",
"0x0003ea09dc5b0ca3ce2961d3200c09b837ea535447e3ba45e5583dbb4e9db48b2208abfec237c907584104b11444f55fa3fa7e6f6a5954817ecea6361516f0271b",
"0x001c654478a700ac0414f5cd8da557e04f9570939802c3963e801523f001ebb4d916d301b50f89760520da2a662b03a207e9372902153ba84ef0f5438472f466c6",
"0x0009f3b0d95ec5d88cfc2db19520f43d110d12c757a58ae7f578095de96e5d319d2c8f43a67b0c01008670f07eb53071b835f19cbb45d6e76281a083087217d988",
"0x000348f024d617f64de7be803547c109b98f833b090e8a3dea0c2bed201ce752c12a4fb71f098941741c42e156651d8a42632e3acbf6f14cd9763b50216af75d61",
"0x0029f85b49319fe7dfced69a258b1baf213d638fe3082b9a13f38e553e9d3269333054c4cb6d1e91bc2dfced1559b58cd6474ac6583a1fc5a2bef5eaa7b96ecea0",
"0x000a4d19e2ec5f98d9ccdc1e94d9334668b87ea451195f9a8319b98cfdb077c5ce1adc64852505188363c7e98b83501e876862d8ffbd8b4051f3cb6dde7f0e8afe",
"0x002568d5d87f19b2b3f2b7341ee61fb45f56dc76734beaa4f1a9865b80b9d9a7d500a191ba054a28841f25c34ad384817a2af2ebada6047517dbb2b6a1338e48c7",
"0x0027f6df1a3610c7447efd280fa6a949713456a1ba79b50dc7fb87c5cb3312b19311b3c9c4420874b02bdc1ea102dc77bb803c1a5042d565aea99054ae0eb816b2",
"0x0018a3d33e2c0d076ca4ddb093516d90cb8ba8b508e8d372d3a8a93aa9eef6079b138df6cb61c8f92dcbea8cd90ead1efa49f3a24f814c88a7bdca8fd83f4d0675",
"0x00268f3122e558d5084a1b3ffc293b67bd2436152fbee80566226d4a753b5b44c40b6d06e2f5f17009a7e146889c2f492b077a462d602e0e72f53373a154aa450e",
"0x0006c81bc9375fe1a0ebb75b151c8a321b85970c1a8a5aa7396a7076a4d6f26c8118a7e9e0987d7c6d0100180c9ba496db2b967f6acf7bc11d002314693416b3bf",
"0x011fb221b659992b8d98a645cb37666f934ded70f1f5d82dad67dace71d7191f8105080000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000d8d6f2b3da41cda2e0000000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4702098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864200384a6f7e2588bb251688f9ab8d10932a98e9f28000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
storageProof: [
"0x02",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
],
},
@@ -272,9 +195,13 @@ describe("ZkTrieVerifier", async () => {
beforeEach(async () => {
const [deployer] = await ethers.getSigners();
const PoseidonHashWithDomainFactory = new ethers.ContractFactory(generateABI(2), createCode(2), deployer);
const Poseidon2Elements = new ethers.ContractFactory(
poseidonUnit.generateABI(2),
poseidonUnit.createCode(2),
deployer
);
const poseidon = await PoseidonHashWithDomainFactory.deploy();
const poseidon = await Poseidon2Elements.deploy();
await poseidon.deployed();
const MockZkTrieVerifier = await ethers.getContractFactory("MockZkTrieVerifier", deployer);
@@ -282,17 +209,6 @@ describe("ZkTrieVerifier", async () => {
await verifier.deployed();
});
const shouldRevert = async (test: ITestConfig, reason: string, extra?: string) => {
const proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
extra || "0x",
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).to.revertedWith(reason);
};
for (const test of testcases) {
it(`should succeed for block[${test.block}] desc[${test.desc}] account[${test.account}] storage[${test.storage}]`, async () => {
const proof = concat([
@@ -308,277 +224,193 @@ describe("ZkTrieVerifier", async () => {
});
}
it("should revert, when InvalidBranchNodeType", async () => {
it("should revert, when parent node invalid", async () => {
const test = testcases[0];
for (const i of [0, 1, test.accountProof.length - 3]) {
const correct = test.accountProof[i];
const prefix = correct.slice(0, 4);
for (let b = 0; b < 16; ++b) {
if (b >= 6 && b < 10) continue;
test.accountProof[i] = test.accountProof[i].replace(prefix, "0x" + chars[b >> 4] + chars[b % 16]);
await shouldRevert(test, "InvalidBranchNodeType");
test.accountProof[i] = correct;
}
}
test.accountProof[0] =
"0x010a52b818e0a009930d62c17f2b1244179b7c14f8e1ae317fb3bfd3a3ba6060031b2a4aa2df31e79f926474987eea69aab84f4581cfd61b0338438110f6be145b";
const proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid parent node");
for (const i of [0, 1, test.storageProof.length - 3]) {
const correct = test.storageProof[i];
const prefix = correct.slice(0, 4);
for (let b = 0; b < 16; ++b) {
if (b >= 6 && b < 10) continue;
test.storageProof[i] = test.storageProof[i].replace(prefix, "0x" + chars[b >> 4] + chars[b % 16]);
await shouldRevert(test, "InvalidBranchNodeType");
test.storageProof[i] = correct;
}
}
test.accountProof[0] =
"0x000a52b818e0a009930d62c17f2b1244179b7c14f8e1ae317fb3bfd3a3ba6060031b2a4aa2df31e79f926474987eea69aab84f4581cfd61b0338438110f6be145b";
test.storageProof[0] =
"0x010a52b818e0a009930d62c17f2b1244179b7c14f8e1ae317fb3bfd3a3ba6060031b2a4aa2df31e79f926474987eea69aab84f4581cfd61b0338438110f6be145b";
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid parent node");
});
it("should revert, when BranchHashMismatch", async () => {
it("should revert, when hash mismatch", async () => {
const test = testcases[0];
for (const i of [1, 2, test.accountProof.length - 3]) {
const correct = test.accountProof[i];
for (const p of [40, 98]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.accountProof[i] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "BranchHashMismatch");
test.accountProof[i] = correct;
}
}
}
for (const i of [1, 2, test.storageProof.length - 3]) {
const correct = test.storageProof[i];
for (const p of [40, 98]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.storageProof[i] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "BranchHashMismatch");
test.storageProof[i] = correct;
}
}
}
test.accountProof[1] =
"0x0028db7c407cab6652f1f194401bd87bda33c9a1723b4f93515bd5929cad02668123fa5a3e69136c8e03a62c805f89c9d3578a6f5fac4bb281fc4d7df12fbcc5dc";
const proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Hash mismatch");
});
it("should revert, when InvalidAccountLeafNodeType", async () => {
it("should revert, when invalid proof magic bytes", async () => {
const test = testcases[0];
const index = test.accountProof.length - 2;
const correct = test.accountProof[index];
const prefix = correct.slice(0, 4);
for (let b = 0; b < 20; ++b) {
if (b === 4 || b === 5) continue;
test.accountProof[index] = test.accountProof[index].replace(prefix, "0x" + chars[b >> 4] + chars[b % 16]);
await shouldRevert(test, "InvalidAccountLeafNodeType");
test.accountProof[index] = correct;
}
test.accountProof[17] =
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704448";
const proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid ProofMagicBytes");
});
it("should revert, when AccountKeyMismatch", async () => {
it("should revert, when invalid leaf node in account proof", async () => {
const test = testcases[0];
const index = test.accountProof.length - 2;
const correct = test.accountProof[index];
for (const p of [4, 10]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.accountProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "AccountKeyMismatch");
test.accountProof[index] = correct;
}
}
// Invalid leaf node in account proof
test.accountProof[16] =
"0x000aef26efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071d0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681205300000000000000000000000000000000000004000000000000000000000000";
let proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid leaf node");
// Node key mismatch in account proof
test.accountProof[16] =
"0x010aef16efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071d0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681205300000000000000000000000000000000000004000000000000000000000000";
proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Node key mismatch");
// Invalid leaf node hash in account proof
test.accountProof[16] =
"0x010aef26efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071e0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681205300000000000000000000000000000000000004000000000000000000000000";
proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid leaf node hash");
// Invalid KeyPreimage length in account proof
test.accountProof[16] =
"0x010aef26efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071d0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681215300000000000000000000000000000000000004000000000000000000000000";
proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith(
"Invalid KeyPreimage length"
);
// Invalid KeyPreimage in account proof
test.accountProof[16] =
"0x010aef26efde9e4bca477d460482bce3de3577f6e9a280dea6d3f9985b4151deab0508000000000000000000000000000000000000000000000000071d0000000000000000000000000000000000000000000000000000000000000013328350573dd32b38291529042b30b83bf20bfc7e18ab6a9755e2ea692d5a7644f896b0d629cf9740d72ccbc90dd6141deb3fab132f1ebc17ab963c612c7123d5a524d0158cc8291b081281272d79459760d885ea652024615d55b114b5872571b21aee99977b8681205300000000000000000000000000000000000003000000000000000000000000";
proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid KeyPreimage");
});
it("should revert, when InvalidAccountCompressedFlag", async () => {
it("should revert, when storage root mismatch", async () => {
const test = testcases[0];
const index = test.accountProof.length - 2;
const correct = test.accountProof[index];
for (const replaced of ["01080000", "05010000"]) {
test.accountProof[index] = test.accountProof[index].replace("05080000", replaced);
await shouldRevert(test, "InvalidAccountCompressedFlag");
test.accountProof[index] = correct;
}
test.storageProof[0] =
"0x000a52b818e0a009930d62c17f2b1244179b7c14f8e1ae317fb3bfd3a3ba6060031b2a4aa2df31e79f926474987eea69aab84f4581cfd61b0338438110f6be145c";
const proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Storage root mismatch");
});
it("should revert, when InvalidAccountLeafNodeHash", async () => {
it("should revert, when invalid leaf node in storage proof", async () => {
const test = testcases[0];
const index = test.accountProof.length - 2;
const correct = test.accountProof[index];
for (const p of [80, 112, 144, 176, 208]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.accountProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "InvalidAccountLeafNodeHash");
test.accountProof[index] = correct;
}
}
// Invalid leaf node in account proof
test.storageProof[15] =
"0x0026ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111346048bf18a14a209505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a";
let proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid leaf node");
// Node key mismatch in account proof
test.storageProof[15] =
"0x0136ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111346048bf18a14a209505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a";
proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Node key mismatch");
// Invalid leaf node hash in account proof
test.storageProof[15] =
"0x0126ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111446048bf18a14a209505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a";
proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid leaf node hash");
// Invalid KeyPreimage length in account proof
test.storageProof[15] =
"0x0126ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111346048bf18a14a219505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b96a";
proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith(
"Invalid KeyPreimage length"
);
// Invalid KeyPreimage in account proof
test.storageProof[15] =
"0x0126ae15b478408eb45ea8b6f61aad1345f2b6257efd1acc4a6024b26f664c98240101000000000000000000000000000000000000000000000000000111346048bf18a14a209505174b0709a2a1997fe9797cb89648a93f17ce0096cbc1a6ed52b73170b97a";
proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Invalid KeyPreimage");
});
it("should revert, when InvalidAccountKeyPreimageLength", async () => {
it("should revert, when proof length mismatch", async () => {
const test = testcases[0];
const index = test.accountProof.length - 2;
const correct = test.accountProof[index];
for (const p of [396, 397]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.accountProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "InvalidAccountKeyPreimageLength");
test.accountProof[index] = correct;
}
}
});
it("should revert, when InvalidAccountKeyPreimage", async () => {
const test = testcases[0];
const index = test.accountProof.length - 2;
const correct = test.accountProof[index];
for (const p of [398, 438]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.accountProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "InvalidAccountKeyPreimage");
test.accountProof[index] = correct;
}
}
});
it("should revert, when InvalidProofMagicBytes", async () => {
const test = testcases[0];
let index = test.accountProof.length - 1;
let correct = test.accountProof[index];
for (const p of [2, 32, 91]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.accountProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "InvalidProofMagicBytes");
test.accountProof[index] = correct;
}
}
index = test.storageProof.length - 1;
correct = test.storageProof[index];
for (const p of [2, 32, 91]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.storageProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "InvalidProofMagicBytes");
test.storageProof[index] = correct;
}
}
});
it("should revert, when InvalidAccountLeafNodeHash", async () => {
const test = testcases[0];
const correct = test.storageProof.slice();
test.storageProof = [
"0x05",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449",
];
await shouldRevert(test, "InvalidAccountLeafNodeHash");
test.storageProof = correct;
});
it("should revert, when InvalidStorageLeafNodeType", async () => {
const test = testcases[0];
const index = test.storageProof.length - 2;
const correct = test.storageProof[index];
const prefix = correct.slice(0, 4);
for (let b = 0; b < 20; ++b) {
if (b === 4 || b === 5) continue;
test.storageProof[index] = test.storageProof[index].replace(prefix, "0x" + chars[b >> 4] + chars[b % 16]);
await shouldRevert(test, "InvalidStorageLeafNodeType");
test.storageProof[index] = correct;
}
});
it("should revert, when StorageKeyMismatch", async () => {
const test = testcases[0];
const index = test.storageProof.length - 2;
const correct = test.storageProof[index];
for (const p of [4, 10]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.storageProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "StorageKeyMismatch");
test.storageProof[index] = correct;
}
}
});
it("should revert, when InvalidStorageCompressedFlag", async () => {
const test = testcases[0];
const index = test.storageProof.length - 2;
const correct = test.storageProof[index];
for (const replaced of ["00010000", "01000000"]) {
test.storageProof[index] = test.storageProof[index].replace("01010000", replaced);
await shouldRevert(test, "InvalidStorageCompressedFlag");
test.storageProof[index] = correct;
}
});
it("should revert, when InvalidStorageLeafNodeHash", async () => {
const test = testcases[0];
const index = test.storageProof.length - 2;
const correct = test.storageProof[index];
for (const p of [100, 132]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.storageProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "InvalidStorageLeafNodeHash");
test.storageProof[index] = correct;
}
}
});
it("should revert, when InvalidStorageKeyPreimageLength", async () => {
const test = testcases[0];
const index = test.storageProof.length - 2;
const correct = test.storageProof[index];
for (const p of [140, 141]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.storageProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "InvalidStorageKeyPreimageLength");
test.storageProof[index] = correct;
}
}
});
it("should revert, when InvalidStorageKeyPreimage", async () => {
const test = testcases[0];
const index = test.storageProof.length - 2;
const correct = test.storageProof[index];
for (const p of [142, 205]) {
const v = correct[p];
for (let b = 0; b < 3; ++b) {
if (v === chars[b]) continue;
test.storageProof[index] = correct.slice(0, p) + chars[b] + correct.slice(p + 1);
await shouldRevert(test, "InvalidStorageKeyPreimage");
test.storageProof[index] = correct;
}
}
});
it("should revert, when InvalidStorageEmptyLeafNodeHash", async () => {
const test = testcases[0];
const index = test.storageProof.length - 2;
const correct = test.storageProof[index];
test.storageProof[index] = "0x05";
await shouldRevert(test, "InvalidStorageEmptyLeafNodeHash");
test.storageProof[index] = correct;
});
it("should revert, when ProofLengthMismatch", async () => {
const test = testcases[0];
await shouldRevert(test, "ProofLengthMismatch", "0x0000");
const proof = concat([
`0x${test.accountProof.length.toString(16).padStart(2, "0")}`,
...test.accountProof,
`0x${test.storageProof.length.toString(16).padStart(2, "0")}`,
...test.storageProof,
"0x00",
]);
await expect(verifier.verifyZkTrieProof(test.account, test.storage, proof)).revertedWith("Proof length mismatch");
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
import * as dotenv from "dotenv";
import { ethers } from "hardhat";
import { generateABI, createCode } from "../scripts/poseidon";
import poseidonUnit from "circomlib/src/poseidon_gencontract";
dotenv.config();
@@ -15,7 +15,11 @@ async function main() {
let PoseidonUnit2Address = process.env.POSEIDON_UNIT2_ADDR;
if (!PoseidonUnit2Address) {
const Poseidon2Elements = new ethers.ContractFactory(generateABI(2), createCode(2), deployer);
const Poseidon2Elements = new ethers.ContractFactory(
poseidonUnit.generateABI(2),
poseidonUnit.createCode(2),
deployer
);
const poseidon = await Poseidon2Elements.deploy();
console.log("Deploy PoseidonUnit2 contract, hash:", poseidon.deployTransaction.hash);
@@ -24,9 +28,7 @@ async function main() {
PoseidonUnit2Address = poseidon.address;
}
const verifier = await ScrollChainCommitmentVerifier.deploy(PoseidonUnit2Address, L1ScrollChainAddress, {
gasPrice: 1e9,
});
const verifier = await ScrollChainCommitmentVerifier.deploy(PoseidonUnit2Address, L1ScrollChainAddress);
console.log("Deploy ScrollChainCommitmentVerifier contract, hash:", verifier.deployTransaction.hash);
const receipt = await verifier.deployTransaction.wait();
console.log(`✅ Deploy ScrollChainCommitmentVerifier contract at: ${verifier.address}, gas used: ${receipt.gasUsed}`);

View File

@@ -1,202 +0,0 @@
/* eslint-disable node/no-missing-import */
import { ethers } from "ethers";
import Contract from "circomlib/src/evmasm";
import * as constants from "circomlib/src/poseidon_constants";
const N_ROUNDS_F = 8;
const N_ROUNDS_P = [56, 57, 56, 60, 60, 63, 64, 63];
export function createCode(nInputs: number) {
if (nInputs < 1 || nInputs > 8) throw new Error("Invalid number of inputs. Must be 1<=nInputs<=8");
const t = nInputs + 1;
const nRoundsF = N_ROUNDS_F;
const nRoundsP = N_ROUNDS_P[t - 2];
const C = new Contract();
function saveM() {
for (let i = 0; i < t; i++) {
for (let j = 0; j < t; j++) {
C.push(constants.M[t - 2][i][j]);
C.push((1 + i * t + j) * 32);
C.mstore();
}
}
}
function ark(r: number) {
// st, q
for (let i = 0; i < t; i++) {
C.dup(t); // q, st, q
C.push(constants.C[t - 2][r * t + i]); // K, q, st, q
C.dup(2 + i); // st[i], K, q, st, q
C.addmod(); // newSt[i], st, q
C.swap(1 + i); // xx, st, q
C.pop();
}
}
function sigma(p: number) {
// sq, q
C.dup(t); // q, st, q
C.dup(1 + p); // st[p] , q , st, q
C.dup(1); // q, st[p] , q , st, q
C.dup(0); // q, q, st[p] , q , st, q
C.dup(2); // st[p] , q, q, st[p] , q , st, q
C.dup(0); // st[p] , st[p] , q, q, st[p] , q , st, q
C.mulmod(); // st2[p], q, st[p] , q , st, q
C.dup(0); // st2[p], st2[p], q, st[p] , q , st, q
C.mulmod(); // st4[p], st[p] , q , st, q
C.mulmod(); // st5[p], st, q
C.swap(1 + p);
C.pop(); // newst, q
}
function mix() {
C.label("mix");
for (let i = 0; i < t; i++) {
for (let j = 0; j < t; j++) {
if (j === 0) {
C.dup(i + t); // q, newSt, oldSt, q
C.push((1 + i * t + j) * 32);
C.mload(); // M, q, newSt, oldSt, q
C.dup(2 + i + j); // oldSt[j], M, q, newSt, oldSt, q
C.mulmod(); // acc, newSt, oldSt, q
} else {
C.dup(1 + i + t); // q, acc, newSt, oldSt, q
C.push((1 + i * t + j) * 32);
C.mload(); // M, q, acc, newSt, oldSt, q
C.dup(3 + i + j); // oldSt[j], M, q, acc, newSt, oldSt, q
C.mulmod(); // aux, acc, newSt, oldSt, q
C.dup(2 + i + t); // q, aux, acc, newSt, oldSt, q
C.swap(2); // acc, aux, q, newSt, oldSt, q
C.addmod(); // acc, newSt, oldSt, q
}
}
}
for (let i = 0; i < t; i++) {
C.swap(t - i + (t - i - 1));
C.pop();
}
C.push(0);
C.mload();
C.jmp();
}
// Check selector
C.push("0x0100000000000000000000000000000000000000000000000000000000");
C.push(0);
C.calldataload();
C.div();
C.dup(0);
C.push(ethers.utils.keccak256(ethers.utils.toUtf8Bytes(`poseidon(uint256[${nInputs}],uint256)`)).slice(0, 10)); // poseidon(uint256[n],uint256)
C.eq();
C.swap(1);
C.push(ethers.utils.keccak256(ethers.utils.toUtf8Bytes(`poseidon(bytes32[${nInputs}],bytes32)`)).slice(0, 10)); // poseidon(bytes32[n],bytes32)
C.eq();
C.or();
C.jmpi("start");
C.invalid();
C.label("start");
saveM();
C.push("0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001"); // q
// Load t values from the call data.
// The function has a single array param param
// [Selector (4)] [item1 (32)] [item2 (32)] .... [doman (32)]
// Stack positions 0-nInputs.
for (let i = 0; i < nInputs; i++) {
C.push(0x04 + 0x20 * (nInputs - i - 1));
C.calldataload();
}
C.push(0x04 + 0x20 * nInputs);
C.calldataload();
for (let i = 0; i < nRoundsF + nRoundsP; i++) {
ark(i);
if (i < nRoundsF / 2 || i >= nRoundsP + nRoundsF / 2) {
for (let j = 0; j < t; j++) {
sigma(j);
}
} else {
sigma(0);
}
const strLabel = "aferMix" + i;
C._pushLabel(strLabel);
C.push(0);
C.mstore();
C.jmp("mix");
C.label(strLabel);
}
C.push("0x00");
C.mstore(); // Save it to pos 0;
C.push("0x20");
C.push("0x00");
C.return();
mix();
return C.createTxData();
}
export function generateABI(nInputs: number) {
return [
{
constant: true,
inputs: [
{
internalType: `bytes32[${nInputs}]`,
name: "input",
type: `bytes32[${nInputs}]`,
},
{
internalType: "bytes32",
name: "domain",
type: "bytes32",
},
],
name: "poseidon",
outputs: [
{
internalType: "bytes32",
name: "",
type: "bytes32",
},
],
payable: false,
stateMutability: "pure",
type: "function",
},
{
constant: true,
inputs: [
{
internalType: `uint256[${nInputs}]`,
name: "input",
type: `uint256[${nInputs}]`,
},
{
internalType: "uint256",
name: "domain",
type: "uint256",
},
],
name: "poseidon",
outputs: [
{
internalType: "uint256",
name: "",
type: "uint256",
},
],
payable: false,
stateMutability: "pure",
type: "function",
},
];
}

View File

@@ -2,7 +2,7 @@
pragma solidity =0.8.16;
import {IScrollChain} from "./IScrollChain.sol";
import {ScrollChain} from "./ScrollChain.sol";
import {ZkTrieVerifier} from "../../libraries/verifier/ZkTrieVerifier.sol";
contract ScrollChainCommitmentVerifier {
@@ -49,11 +49,11 @@ contract ScrollChainCommitmentVerifier {
bytes32 storageKey,
bytes calldata proof
) external view returns (bytes32 storageValue) {
require(IScrollChain(rollup).isBatchFinalized(batchIndex), "Batch not finalized");
require(ScrollChain(rollup).isBatchFinalized(batchIndex), "Batch not finalized");
bytes32 computedStateRoot;
(computedStateRoot, storageValue) = ZkTrieVerifier.verifyZkTrieProof(poseidon, account, storageKey, proof);
bytes32 expectedStateRoot = IScrollChain(rollup).finalizedStateRoots(batchIndex);
bytes32 expectedStateRoot = ScrollChain(rollup).finalizedStateRoots(batchIndex);
require(computedStateRoot == expectedStateRoot, "Invalid inclusion proof");
}
}

View File

@@ -2,7 +2,9 @@
pragma solidity ^0.8.16;
// solhint-disable no-inline-assembly
interface PoseidonUnit2 {
function poseidon(uint256[2] memory) external view returns (uint256);
}
library ZkTrieVerifier {
/// @notice Internal function to validates a proof from eth_getProof.
@@ -56,20 +58,19 @@ library ZkTrieVerifier {
}
}
// compute poseidon hash of two uint256
function poseidon_hash(hasher, v0, v1, domain) -> r {
function poseidon_hash(hasher, v0, v1) -> r {
let x := mload(0x40)
// keccack256("poseidon(uint256[2],uint256)")
mstore(x, 0xa717016c00000000000000000000000000000000000000000000000000000000)
// keccack256("poseidon(uint256[2])")
mstore(x, 0x29a5f2f600000000000000000000000000000000000000000000000000000000)
mstore(add(x, 0x04), v0)
mstore(add(x, 0x24), v1)
mstore(add(x, 0x44), domain)
let success := staticcall(gas(), hasher, x, 0x64, 0x20, 0x20)
let success := staticcall(gas(), hasher, x, 0x44, 0x20, 0x20)
require(success, "poseidon hash failed")
r := mload(0x20)
}
// compute poseidon hash of 1 uint256
function hash_uint256(hasher, v) -> r {
r := poseidon_hash(hasher, shr(128, v), and(v, 0xffffffffffffffffffffffffffffffff), 512)
r := poseidon_hash(hasher, shr(128, v), and(v, 0xffffffffffffffffffffffffffffffff))
}
// traverses the tree from the root to the node before the leaf.
@@ -89,16 +90,15 @@ library ZkTrieVerifier {
} {
// must be a parent node with two children
let nodeType := byte(0, calldataload(ptr))
// 6 <= nodeType && nodeType < 10
require(lt(sub(nodeType, 6), 4), "InvalidBranchNodeType")
ptr := add(ptr, 1)
require(eq(nodeType, 0), "Invalid parent node")
// load left/right child hash
let childHashL := calldataload(ptr)
ptr := add(ptr, 0x20)
let childHashR := calldataload(ptr)
ptr := add(ptr, 0x20)
let hash := poseidon_hash(hasher, childHashL, childHashR, nodeType)
let hash := poseidon_hash(hasher, childHashL, childHashR)
// first item is considered the root node.
// Otherwise verifies that the hash of the current node
@@ -108,7 +108,7 @@ library ZkTrieVerifier {
rootHash := hash
}
default {
require(eq(hash, expectedHash), "BranchHashMismatch")
require(eq(hash, expectedHash), "Hash mismatch")
}
// decide which path to walk based on key
@@ -130,132 +130,129 @@ library ZkTrieVerifier {
x := keccak256(x, 0x2d)
require(
eq(x, 0x950654da67865a81bc70e45f3230f5179f08e29c66184bf746f71050f117b3b8),
"InvalidProofMagicBytes"
"Invalid ProofMagicBytes"
)
ptr := add(ptr, 0x2d) // skip ProofMagicBytes
}
function verifyAccountProof(hasher, _account, _ptr) -> ptr, storageRootHash, _stateRoot {
ptr := _ptr
let leafHash
let key := hash_uint256(hasher, shl(96, _account))
// `stateRoot` is a return value and must be checked by the caller
ptr, _stateRoot, leafHash := walkTree(hasher, key, ptr)
switch byte(0, calldataload(ptr))
case 4 {
// nonempty leaf node
ptr := add(ptr, 0x01) // skip NodeType
require(eq(calldataload(ptr), key), "AccountKeyMismatch")
ptr := add(ptr, 0x20) // skip NodeKey
require(eq(shr(224, calldataload(ptr)), 0x05080000), "InvalidAccountCompressedFlag")
ptr := add(ptr, 0x04) // skip CompressedFlag
// compute value hash for State Account Leaf Node, details can be found in
// https://github.com/scroll-tech/mpt-circuit/blob/v0.7/spec/mpt-proof.md#account-segmenttypes
// [nonce||codesize||0, balance, storage_root, keccak codehash, poseidon codehash]
mstore(0x00, calldataload(ptr))
ptr := add(ptr, 0x20) // skip nonce||codesize||0
mstore(0x00, poseidon_hash(hasher, mload(0x00), calldataload(ptr), 1280))
ptr := add(ptr, 0x20) // skip balance
storageRootHash := calldataload(ptr)
ptr := add(ptr, 0x20) // skip StorageRoot
let tmpHash := hash_uint256(hasher, calldataload(ptr))
ptr := add(ptr, 0x20) // skip KeccakCodeHash
tmpHash := poseidon_hash(hasher, storageRootHash, tmpHash, 1280)
tmpHash := poseidon_hash(hasher, mload(0x00), tmpHash, 1280)
tmpHash := poseidon_hash(hasher, tmpHash, calldataload(ptr), 1280)
ptr := add(ptr, 0x20) // skip PoseidonCodeHash
tmpHash := poseidon_hash(hasher, key, tmpHash, 4)
require(eq(leafHash, tmpHash), "InvalidAccountLeafNodeHash")
require(eq(0x20, byte(0, calldataload(ptr))), "InvalidAccountKeyPreimageLength")
ptr := add(ptr, 0x01) // skip KeyPreimage length
require(eq(shl(96, _account), calldataload(ptr)), "InvalidAccountKeyPreimage")
ptr := add(ptr, 0x20) // skip KeyPreimage
}
case 5 {
ptr := add(ptr, 0x01) // skip NodeType
}
default {
revertWith("InvalidAccountLeafNodeType")
}
// compare ProofMagicBytes
ptr := checkProofMagicBytes(hasher, ptr)
}
function verifyStorageProof(hasher, _storageKey, storageRootHash, _ptr) -> ptr, _storageValue {
ptr := _ptr
let leafHash
let key := hash_uint256(hasher, _storageKey)
let rootHash
ptr, rootHash, leafHash := walkTree(hasher, key, ptr)
// The root hash of the storage tree must match the value from the account leaf.
// But when the leaf node is the same as the root node, the function `walkTree` will return
// `rootHash=0` and `leafHash=0`. In such case, we don't need to check the value of `rootHash`.
// And the value of `leafHash` should be the same as `storageRootHash`.
switch rootHash
case 0 {
leafHash := storageRootHash
}
default {
require(eq(rootHash, storageRootHash), "StorageRootMismatch")
}
switch byte(0, calldataload(ptr))
case 4 {
ptr := add(ptr, 0x01) // skip NodeType
require(eq(calldataload(ptr), key), "StorageKeyMismatch")
ptr := add(ptr, 0x20) // skip NodeKey
require(eq(shr(224, calldataload(ptr)), 0x01010000), "InvalidStorageCompressedFlag")
ptr := add(ptr, 0x04) // skip CompressedFlag
_storageValue := calldataload(ptr)
ptr := add(ptr, 0x20) // skip StorageValue
// compute leaf node hash and compare, details can be found in
// https://github.com/scroll-tech/mpt-circuit/blob/v0.7/spec/mpt-proof.md#storage-segmenttypes
mstore(0x00, hash_uint256(hasher, _storageValue))
mstore(0x00, poseidon_hash(hasher, key, mload(0x00), 4))
require(eq(leafHash, mload(0x00)), "InvalidStorageLeafNodeHash")
require(eq(0x20, byte(0, calldataload(ptr))), "InvalidStorageKeyPreimageLength")
ptr := add(ptr, 0x01) // skip KeyPreimage length
require(eq(_storageKey, calldataload(ptr)), "InvalidStorageKeyPreimage")
ptr := add(ptr, 0x20) // skip KeyPreimage
}
case 5 {
ptr := add(ptr, 0x01) // skip NodeType
require(eq(leafHash, 0), "InvalidStorageEmptyLeafNodeHash")
}
default {
revertWith("InvalidStorageLeafNodeType")
}
// compare ProofMagicBytes
ptr := checkProofMagicBytes(hasher, ptr)
}
let storageRootHash
// shared variable names
let storageHash
// starting point
let ptr := proof.offset
// check the correctness of account proof
ptr, storageRootHash, stateRoot := verifyAccountProof(poseidon, account, ptr)
// verify account proof
{
let leafHash
let key := hash_uint256(poseidon, shl(96, account))
// check the correctness of storage proof
ptr, storageValue := verifyStorageProof(poseidon, storageKey, storageRootHash, ptr)
// `stateRoot` is a return value and must be checked by the caller
ptr, stateRoot, leafHash := walkTree(poseidon, key, ptr)
require(eq(1, byte(0, calldataload(ptr))), "Invalid leaf node")
ptr := add(ptr, 0x01) // skip NodeType
require(eq(calldataload(ptr), key), "Node key mismatch")
ptr := add(ptr, 0x20) // skip NodeKey
{
let valuePreimageLength := and(shr(224, calldataload(ptr)), 0xffff)
// @todo check CompressedFlag
ptr := add(ptr, 0x04) // skip CompressedFlag
ptr := add(ptr, valuePreimageLength) // skip ValuePreimage
}
// compute value hash for State Account Leaf Node
{
let tmpHash1 := calldataload(ptr)
ptr := add(ptr, 0x20) // skip nonce/codesize/0
tmpHash1 := poseidon_hash(poseidon, tmpHash1, calldataload(ptr))
ptr := add(ptr, 0x20) // skip balance
storageHash := calldataload(ptr)
ptr := add(ptr, 0x20) // skip StorageRoot
let tmpHash2 := hash_uint256(poseidon, calldataload(ptr))
ptr := add(ptr, 0x20) // skip KeccakCodeHash
tmpHash2 := poseidon_hash(poseidon, storageHash, tmpHash2)
tmpHash2 := poseidon_hash(poseidon, tmpHash1, tmpHash2)
tmpHash2 := poseidon_hash(poseidon, tmpHash2, calldataload(ptr))
ptr := add(ptr, 0x20) // skip PoseidonCodeHash
tmpHash1 := poseidon_hash(poseidon, 1, key)
tmpHash1 := poseidon_hash(poseidon, tmpHash1, tmpHash2)
require(eq(leafHash, tmpHash1), "Invalid leaf node hash")
}
require(eq(0x20, byte(0, calldataload(ptr))), "Invalid KeyPreimage length")
ptr := add(ptr, 0x01) // skip KeyPreimage length
require(eq(shl(96, account), calldataload(ptr)), "Invalid KeyPreimage")
ptr := add(ptr, 0x20) // skip KeyPreimage
// compare ProofMagicBytes
ptr := checkProofMagicBytes(poseidon, ptr)
}
// verify storage proof
{
let leafHash
let key := hash_uint256(poseidon, storageKey)
{
let rootHash
ptr, rootHash, leafHash := walkTree(poseidon, key, ptr)
switch rootHash
case 0 {
// in the case that the leaf is the only element, then
// the hash of the leaf must match the value from the account leaf
require(eq(leafHash, storageHash), "Storage root mismatch")
}
default {
// otherwise the root hash of the storage tree
// must match the value from the account leaf
require(eq(rootHash, storageHash), "Storage root mismatch")
}
}
switch byte(0, calldataload(ptr))
case 1 {
ptr := add(ptr, 0x01) // skip NodeType
require(eq(calldataload(ptr), key), "Node key mismatch")
ptr := add(ptr, 0x20) // skip NodeKey
{
let valuePreimageLength := and(shr(224, calldataload(ptr)), 0xffff)
// @todo check CompressedFlag
ptr := add(ptr, 0x04) // skip CompressedFlag
ptr := add(ptr, valuePreimageLength) // skip ValuePreimage
}
storageValue := calldataload(ptr)
ptr := add(ptr, 0x20) // skip StorageValue
mstore(0x00, hash_uint256(poseidon, storageValue))
key := poseidon_hash(poseidon, 1, key)
mstore(0x00, poseidon_hash(poseidon, key, mload(0x00)))
require(eq(leafHash, mload(0x00)), "Invalid leaf node hash")
require(eq(0x20, byte(0, calldataload(ptr))), "Invalid KeyPreimage length")
ptr := add(ptr, 0x01) // skip KeyPreimage length
require(eq(storageKey, calldataload(ptr)), "Invalid KeyPreimage")
ptr := add(ptr, 0x20) // skip KeyPreimage
}
case 2 {
ptr := add(ptr, 0x01) // skip NodeType
require(eq(leafHash, 0), "Invalid empty node hash")
}
default {
revertWith("Invalid leaf node")
}
// compare ProofMagicBytes
ptr := checkProofMagicBytes(poseidon, ptr)
}
// the one and only boundary check
// in case an attacker crafted a malicous payload
// and succeeds in the prior verification steps
// then this should catch any bogus accesses
if iszero(eq(ptr, add(proof.offset, proof.length))) {
revertWith("ProofLengthMismatch")
revertWith("Proof length mismatch")
}
}
}

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 15, int(cur))
assert.Equal(t, 14, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -1,46 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE pending_transaction
(
id SERIAL PRIMARY KEY,
-- context info
context_id VARCHAR NOT NULL, -- batch hash in commit/finalize tx, block hash in update gas oracle tx
hash VARCHAR NOT NULL,
status SMALLINT NOT NULL,
rlp_encoding BYTEA NOT NULL,
-- debug info
chain_id BIGINT NOT NULL,
type SMALLINT NOT NULL,
gas_tip_cap BIGINT NOT NULL,
gas_fee_cap BIGINT NOT NULL, -- based on geth's implementation, it's gas price in legacy tx.
gas_limit BIGINT NOT NULL,
nonce BIGINT NOT NULL,
submit_block_number BIGINT NOT NULL,
-- sender info
sender_name VARCHAR NOT NULL,
sender_service VARCHAR NOT NULL,
sender_address VARCHAR NOT NULL,
sender_type SMALLINT NOT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX unique_idx_pending_transaction_on_hash ON pending_transaction(hash);
CREATE INDEX idx_pending_transaction_on_sender_type_status_nonce_gas_fee_cap ON pending_transaction (sender_type, status, nonce, gas_fee_cap);
CREATE INDEX idx_pending_transaction_on_sender_address_nonce ON pending_transaction(sender_address, nonce);
COMMENT ON COLUMN pending_transaction.sender_type IS 'unknown, commit batch, finalize batch, L1 gas oracle, L2 gas oracle';
COMMENT ON COLUMN pending_transaction.status IS 'unknown, pending, replaced, confirmed, confirmed failed';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS pending_transaction;
-- +goose StatementEnd

View File

@@ -488,7 +488,13 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -558,6 +564,7 @@ github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZn
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU=
@@ -850,7 +857,6 @@ github.com/onsi/ginkgo/v2 v2.8.1 h1:xFTEVwOFa1D/Ty24Ws1npBWkDYEV9BqZrsDxVrVkrrU=
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
@@ -974,12 +980,6 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
@@ -1141,7 +1141,6 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1155,11 +1154,9 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@@ -1198,12 +1195,9 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1217,12 +1211,10 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1281,7 +1273,6 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=

View File

@@ -206,25 +206,11 @@ func (p *ProverCore) proveBatch(chunkInfosByt []byte, chunkProofsByt []byte) ([]
}
func (p *ProverCore) proveChunk(tracesByt []byte) ([]byte, error) {
tracesStr := C.CString(string(tracesByt))
defer C.free(unsafe.Pointer(tracesStr))
log.Info("Start to create chunk proof ...")
cProof := C.gen_chunk_proof(tracesStr)
defer C.free_c_chars(cProof)
C.gen_chunk_proof("")
//defer C.free_c_chars(cProof)
log.Info("Finish creating chunk proof!")
var result ProofResult
err := json.Unmarshal([]byte(C.GoString(cProof)), &result)
if err != nil {
return nil, fmt.Errorf("failed to parse chunk proof result: %v", err)
}
if result.Error != "" {
return nil, fmt.Errorf("failed to generate chunk proof: %s", result.Error)
}
return result.Message, nil
return nil, nil
}
func (p *ProverCore) mayDumpProof(id string, proofByt []byte) error {

View File

@@ -0,0 +1,61 @@
//go:build ffi
// go test -v -race -gcflags="-l" -ldflags="-s=false" -tags ffi ./...
package core
import (
"encoding/json"
"flag"
"github.com/stretchr/testify/assert"
"io/ioutil"
"net/http"
_ "net/http/pprof"
"testing"
"github.com/scroll-tech/go-ethereum/core/types"
"scroll-tech/common/types/message"
"scroll-tech/prover/config"
)
var (
paramsPath = flag.String("params", "/assets/test_params", "params dir")
assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
tracePath1 = flag.String("trace1", "/assets/traces/1_transfer.json", "chunk trace 1")
)
func initPyroscopse() {
go func() {
if runServerErr := http.ListenAndServe(":8089", nil); runServerErr != nil {
panic(runServerErr)
}
}()
}
func TestFFI(t *testing.T) {
ballast := make([]byte, 100*1024*1024*1024) // 10G
initPyroscopse()
chunkProverConfig := &config.ProverCoreConfig{
ParamsPath: *paramsPath,
AssetsPath: *assetsPath,
ProofType: message.ProofTypeChunk,
}
chunkProverCore, _ := NewProverCore(chunkProverConfig)
for {
chunkProverCore.proveChunk()
}
runtime.KeepAlive(ballast)
}
func readChunkTrace(t *testing.T, filePat string) []*types.BlockTrace {
byt, err := ioutil.ReadFile(filePat)
assert.NoError(t, err)
trace := &types.BlockTrace{}
assert.NoError(t, json.Unmarshal(byt, trace))
return []*types.BlockTrace{trace}
}

View File

@@ -21,6 +21,8 @@ require (
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grafana/pyroscope-go v1.0.4 // indirect
github.com/grafana/pyroscope-go/godeltaprof v0.1.4 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect

View File

@@ -40,6 +40,10 @@ github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/pyroscope-go v1.0.4 h1:oyQX0BOkL+iARXzHuCdIF5TQ7/sRSel1YFViMHC7Bm0=
github.com/grafana/pyroscope-go v1.0.4/go.mod h1:0d7ftwSMBV/Awm7CCiYmHQEG8Y44Ma3YSjt+nWcWztY=
github.com/grafana/pyroscope-go/godeltaprof v0.1.4 h1:mDsJ3ngul7UfrHibGQpV66PbZ3q1T8glz/tK3bQKKEk=
github.com/grafana/pyroscope-go/godeltaprof v0.1.4/go.mod h1:1HSPtjU8vLG0jE9JrTdzjgFqdJ/VgN7fvxBNq3luJko=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=

View File

@@ -56,7 +56,7 @@ func action(ctx *cli.Context) error {
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
log.Crit("failed to close db connection", "error", err)
log.Error("can not close ormFactory", "error", err)
}
}()
@@ -64,12 +64,14 @@ func action(ctx *cli.Context) error {
observability.Server(ctx, db)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,

View File

@@ -59,7 +59,7 @@ func action(ctx *cli.Context) error {
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
log.Crit("failed to close db connection", "error", err)
log.Error("can not close ormFactory", "error", err)
}
}()
@@ -68,24 +68,28 @@ func action(ctx *cli.Context) error {
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
return err
}
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
if err != nil {
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, registry)
if err != nil {
log.Crit("failed to create new l2 relayer", "config file", cfgFile, "error", err)
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err
}
// Start l1 watcher process
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
@@ -99,7 +103,6 @@ func action(ctx *cli.Context) error {
if loopErr = l1watcher.FetchBlockHeader(number - 1); loopErr != nil {
log.Error("Failed to fetch L1 block header", "lastest", number-1, "err", loopErr)
return
}
})

View File

@@ -59,7 +59,7 @@ func action(ctx *cli.Context) error {
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
log.Crit("failed to close db connection", "error", err)
log.Error("can not close ormFactory", "error", err)
}
}()
@@ -69,23 +69,27 @@ func action(ctx *cli.Context) error {
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, registry)
if err != nil {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db, registry)
if err != nil {
log.Crit("failed to create chunkProposer", "config file", cfgFile, "error", err)
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db, registry)
if err != nil {
log.Crit("failed to create batchProposer", "config file", cfgFile, "error", err)
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,

View File

@@ -9,13 +9,16 @@
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://rpc.scroll.io",
"check_pending_time": 2,
"check_balance_time": 100,
"escalate_blocks": 100,
"confirmations": "0x1",
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "LegacyTx",
"check_pending_time": 3
"min_balance": 100000000000000000000,
"pending_limit": 10
},
"gas_oracle_config": {
"min_gas_price": 0,
@@ -34,13 +37,16 @@
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://rpc.ankr.com/eth",
"check_pending_time": 10,
"check_balance_time": 100,
"escalate_blocks": 100,
"confirmations": "0x6",
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000,
"tx_type": "DynamicFeeTx",
"check_pending_time": 12
"min_balance": 100000000000000000000,
"pending_limit": 10
},
"gas_oracle_config": {
"min_gas_price": 0,

View File

@@ -6,6 +6,7 @@ require (
github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/gin-gonic/gin v1.9.1
github.com/go-resty/resty/v2 v2.7.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/prometheus/client_golang v1.14.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20231130005111-38a3a9c9198c
github.com/smartystreets/goconvey v1.8.0

View File

@@ -156,6 +156,8 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=

View File

@@ -4,6 +4,7 @@ import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
@@ -28,6 +29,12 @@ type SenderConfig struct {
MaxGasPrice uint64 `json:"max_gas_price"`
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
TxType string `json:"tx_type"`
// The min balance set for check and set balance for sender's accounts.
MinBalance *big.Int `json:"min_balance"`
// The interval (in seconds) to check balance and top up sender's accounts
CheckBalanceTime uint64 `json:"check_balance_time"`
// The sender's pending count limit.
PendingLimit int `json:"pending_limit"`
}
// ChainMonitor this config is used to get batch status from chain_monitor API.

View File

@@ -2,6 +2,7 @@ package relayer
import (
"context"
"errors"
"fmt"
"math/big"
@@ -43,7 +44,7 @@ type Layer1Relayer struct {
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, reg prometheus.Registerer) (*Layer1Relayer, error) {
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", types.SenderTypeL1GasOracle, db, reg)
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
@@ -117,7 +118,9 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
}
return
}
@@ -133,38 +136,28 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
}
}
func (r *Layer1Relayer) handleConfirmation(cfm *sender.Confirmation) {
switch cfm.SenderType {
case types.SenderTypeL1GasOracle:
var status types.GasOracleStatus
if cfm.IsSuccessful {
status = types.GasOracleImported
r.metrics.rollupL1UpdateGasOracleConfirmedTotal.Inc()
log.Info("UpdateGasOracleTxType transaction confirmed in layer2", "confirmation", cfm)
} else {
status = types.GasOracleImportedFailed
r.metrics.rollupL1UpdateGasOracleConfirmedFailedTotal.Inc()
log.Warn("UpdateGasOracleTxType transaction confirmed but failed in layer2", "confirmation", cfm)
}
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ContextID, status, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "confirmation", cfm, "err", err)
}
default:
log.Warn("Unknown transaction type", "confirmation", cfm)
}
log.Info("Transaction confirmed in layer2", "confirmation", cfm)
}
func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.gasOracleSender.ConfirmChan():
r.handleConfirmation(cfm)
r.metrics.rollupL1GasOraclerConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL1GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
} else {
// @todo handle db error
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateGasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
}
}
}

View File

@@ -8,10 +8,12 @@ import (
)
type l1RelayerMetrics struct {
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
rollupL1RelayerLastGasPrice prometheus.Gauge
rollupL1UpdateGasOracleConfirmedTotal prometheus.Counter
rollupL1UpdateGasOracleConfirmedFailedTotal prometheus.Counter
rollupL1RelayedMsgsTotal prometheus.Counter
rollupL1RelayedMsgsFailureTotal prometheus.Counter
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
rollupL1RelayerLastGasPrice prometheus.Gauge
rollupL1MsgsRelayedConfirmedTotal prometheus.Counter
rollupL1GasOraclerConfirmedTotal prometheus.Counter
}
var (
@@ -22,6 +24,18 @@ var (
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
initL1RelayerMetricOnce.Do(func() {
l1RelayerMetric = &l1RelayerMetrics{
rollupL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_msg_relayed_total",
Help: "The total number of the l1 relayed message.",
}),
rollupL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_msg_relayed_failure_total",
Help: "The total number of the l1 relayed failure message.",
}),
rollupL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_relayed_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
rollupL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_gas_price_oracler_total",
Help: "The total number of layer1 gas price oracler run total",
@@ -30,13 +44,9 @@ func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
Name: "rollup_layer1_gas_price_latest_gas_price",
Help: "The latest gas price of rollup relayer l1",
}),
rollupL1UpdateGasOracleConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_update_gas_oracle_confirmed_total",
Help: "The total number of updating layer1 gas oracle confirmed",
}),
rollupL1UpdateGasOracleConfirmedFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_update_gas_oracle_confirmed_failed_total",
Help: "The total number of updating layer1 gas oracle confirmed failed",
rollupL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_gas_oracler_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
}
})

View File

@@ -61,14 +61,12 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
// Simulate message confirmations.
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ContextID: "gas-oracle-1",
ID: "gas-oracle-1",
IsSuccessful: true,
SenderType: types.SenderTypeL1GasOracle,
})
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ContextID: "gas-oracle-2",
ID: "gas-oracle-2",
IsSuccessful: false,
SenderType: types.SenderTypeL1GasOracle,
})
// Check the database for the updated status using TryTimes.
@@ -76,7 +74,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
msg1, err1 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-1"})
msg2, err2 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-2"})
return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleImportedFailed
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleFailed
})
assert.True(t, ok)
}

View File

@@ -2,9 +2,11 @@ package relayer
import (
"context"
"errors"
"fmt"
"math/big"
"sort"
"sync"
"time"
"github.com/go-resty/resty/v2"
@@ -58,23 +60,31 @@ type Layer2Relayer struct {
// Used to get batch status from chain_monitor api.
chainMonitorClient *resty.Client
// A list of processing batches commitment.
// key(string): confirmation ID, value(string): batch hash.
processingCommitment sync.Map
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch hash.
processingFinalization sync.Map
metrics *l2RelayerMetrics
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, reg prometheus.Registerer) (*Layer2Relayer, error) {
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", types.SenderTypeCommitBatch, db, reg)
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new commit sender failed for address %s, err: %w", addr.Hex(), err)
}
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey, "l2_relayer", "finalize_sender", types.SenderTypeFinalizeBatch, db, reg)
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey, "l2_relayer", "finalize_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.FinalizeSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new finalize sender failed for address %s, err: %w", addr.Hex(), err)
}
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l2_relayer", "gas_oracle_sender", types.SenderTypeL2GasOracle, db, reg)
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l2_relayer", "gas_oracle_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
@@ -115,7 +125,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
cfg: cfg,
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
}
// chain_monitor client
@@ -241,8 +253,8 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
// handle confirmation
case confirmation := <-r.commitSender.ConfirmChan():
if confirmation.ContextID != batchHash {
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ContextID)
if confirmation.ID != batchHash {
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ID)
}
if !confirmation.IsSuccessful {
return fmt.Errorf("import genesis batch tx failed")
@@ -281,7 +293,9 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, big.NewInt(0), data, 0)
if err != nil {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
}
return
}
@@ -368,13 +382,14 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
}
// send transaction
txID := batch.Hash + "-commit"
fallbackGasLimit := uint64(float64(batch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
if types.RollupStatus(batch.RollupStatus) == types.RollupCommitFailed {
// use eth_estimateGas if this batch has been committed failed.
fallbackGasLimit = 0
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", batch.Hash)
}
txHash, err := r.commitSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, fallbackGasLimit)
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, fallbackGasLimit)
if err != nil {
log.Error(
"Failed to send commitBatch tx to layer1",
@@ -400,6 +415,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
r.processingCommitment.Store(txID, batch.Hash)
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
}
}
@@ -538,27 +554,34 @@ func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
}
}
txID := batch.Hash + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.finalizeSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, big.NewInt(0), txCalldata, 0)
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), txCalldata, 0)
finalizeTxHash := &txHash
if err != nil {
log.Error(
"finalizeBatch in layer1 failed",
"with proof", withProof,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
)
log.Debug(
"finalizeBatch in layer1 failed",
"with proof", withProof,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(txCalldata),
"err", err,
)
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
// This can happen normally if we try to finalize 2 or more
// batches around the same time. The 2nd tx might fail since
// the client does not see the 1st tx's updates at this point.
// TODO: add more fine-grained error handling
log.Error(
"finalizeBatch in layer1 failed",
"with proof", withProof,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
)
log.Debug(
"finalizeBatch in layer1 failed",
"with proof", withProof,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(txCalldata),
"err", err,
)
}
return err
}
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", batch.Index, "batch hash", batch.Hash, "tx hash", batch.Hash)
@@ -568,6 +591,7 @@ func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", batch.Index, "batch hash", batch.Hash, "tx hash", finalizeTxHash.String(), "err", err)
return err
}
r.processingFinalization.Store(txID, batch.Hash)
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
return nil
}
@@ -618,59 +642,53 @@ func (r *Layer2Relayer) getBatchStatusByIndex(batch *orm.Batch) (bool, error) {
return response.Data, nil
}
func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) {
switch cfm.SenderType {
case types.SenderTypeCommitBatch:
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is CommitBatches transaction
if batchHash, ok := r.processingCommitment.Load(confirmation.ID); ok {
transactionType = "BatchesCommitment"
var status types.RollupStatus
if cfm.IsSuccessful {
if confirmation.IsSuccessful {
status = types.RollupCommitted
r.metrics.rollupL2BatchesCommittedConfirmedTotal.Inc()
} else {
status = types.RollupCommitFailed
r.metrics.rollupL2BatchesCommittedConfirmedFailedTotal.Inc()
log.Warn("CommitBatchTxType transaction confirmed but failed in layer1", "confirmation", cfm)
log.Warn("commitBatch transaction confirmed but failed in layer1", "confirmation", confirmation)
}
err := r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, cfm.ContextID, cfm.TxHash.String(), status)
// @todo handle db error
err := r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "confirmation", cfm, "err", err)
log.Warn("UpdateCommitTxHashAndRollupStatus failed",
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
case types.SenderTypeFinalizeBatch:
r.metrics.rollupL2BatchesCommittedConfirmedTotal.Inc()
r.processingCommitment.Delete(confirmation.ID)
}
// check whether it is proof finalization transaction
if batchHash, ok := r.processingFinalization.Load(confirmation.ID); ok {
transactionType = "ProofFinalization"
var status types.RollupStatus
if cfm.IsSuccessful {
if confirmation.IsSuccessful {
status = types.RollupFinalized
r.metrics.rollupL2BatchesFinalizedConfirmedTotal.Inc()
} else {
status = types.RollupFinalizeFailed
r.metrics.rollupL2BatchesFinalizedConfirmedFailedTotal.Inc()
log.Warn("FinalizeBatchTxType transaction confirmed but failed in layer1", "confirmation", cfm)
log.Warn("finalizeBatchWithProof transaction confirmed but failed in layer1", "confirmation", confirmation)
}
err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, cfm.ContextID, cfm.TxHash.String(), status)
// @todo handle db error
err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchHash.(string), confirmation.TxHash.String(), status)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "confirmation", cfm, "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed",
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
case types.SenderTypeL2GasOracle:
batchHash := cfm.ContextID
var status types.GasOracleStatus
if cfm.IsSuccessful {
status = types.GasOracleImported
r.metrics.rollupL2UpdateGasOracleConfirmedTotal.Inc()
} else {
status = types.GasOracleImportedFailed
r.metrics.rollupL2UpdateGasOracleConfirmedFailedTotal.Inc()
log.Warn("UpdateGasOracleTxType transaction confirmed but failed in layer1", "confirmation", cfm)
}
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batchHash, status, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "confirmation", cfm, "err", err)
}
default:
log.Warn("Unknown transaction type", "confirmation", cfm)
r.metrics.rollupL2BatchesFinalizedConfirmedTotal.Inc()
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("Transaction confirmed in layer1", "confirmation", cfm)
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
@@ -678,12 +696,27 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
select {
case <-ctx.Done():
return
case cfm := <-r.commitSender.ConfirmChan():
r.handleConfirmation(cfm)
case cfm := <-r.finalizeSender.ConfirmChan():
r.handleConfirmation(cfm)
case confirmation := <-r.commitSender.ConfirmChan():
r.handleConfirmation(confirmation)
case confirmation := <-r.finalizeSender.ConfirmChan():
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleSender.ConfirmChan():
r.handleConfirmation(cfm)
r.metrics.rollupL2BatchesGasOraclerConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Warn("transaction confirmed but failed in layer1", "confirmation", cfm)
} else {
// @todo handle db error
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleImported, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "err", err)
}
log.Info("transaction confirmed in layer1", "confirmation", cfm)
}
}
}
}

View File

@@ -19,8 +19,7 @@ type l2RelayerMetrics struct {
rollupL2BatchesCommittedConfirmedFailedTotal prometheus.Counter
rollupL2BatchesFinalizedConfirmedTotal prometheus.Counter
rollupL2BatchesFinalizedConfirmedFailedTotal prometheus.Counter
rollupL2UpdateGasOracleConfirmedTotal prometheus.Counter
rollupL2UpdateGasOracleConfirmedFailedTotal prometheus.Counter
rollupL2BatchesGasOraclerConfirmedTotal prometheus.Counter
rollupL2ChainMonitorLatestFailedCall prometheus.Counter
rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
}
@@ -77,13 +76,9 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
Name: "rollup_layer2_process_finalized_batches_confirmed_failed_total",
Help: "The total number of layer2 process finalized batches confirmed failed total",
}),
rollupL2UpdateGasOracleConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_update_layer1_gas_oracle_confirmed_total",
Help: "The total number of updating layer2 gas oracle confirmed",
}),
rollupL2UpdateGasOracleConfirmedFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_update_layer1_gas_oracle_confirmed_failed_total",
Help: "The total number of updating layer2 gas oracle confirmed failed",
rollupL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_gras_oracler_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
rollupL2ChainMonitorLatestFailedCall: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_chain_monitor_latest_failed_batch_call",

View File

@@ -164,9 +164,11 @@ func testL2RelayerCommitConfirm(t *testing.T) {
assert.NoError(t, err)
// Simulate message confirmations.
processingKeys := []string{"committed-1", "committed-2"}
isSuccessful := []bool{true, false}
batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(isSuccessful))
batchHashes := make([]string, len(processingKeys))
for i := range batchHashes {
batchMeta := &types.BatchMeta{
StartChunkIndex: 0,
@@ -179,12 +181,12 @@ func testL2RelayerCommitConfirm(t *testing.T) {
batchHashes[i] = batch.Hash
}
for i, batchHash := range batchHashes {
for i, key := range processingKeys {
l2Relayer.processingCommitment.Store(key, batchHashes[i])
l2Relayer.commitSender.SendConfirmation(&sender.Confirmation{
ContextID: batchHash,
ID: key,
IsSuccessful: isSuccessful[i],
TxHash: common.HexToHash("0x123456789abcdef"),
SenderType: types.SenderTypeCommitBatch,
})
}
@@ -218,9 +220,11 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
assert.NoError(t, err)
// Simulate message confirmations.
processingKeys := []string{"finalized-1", "finalized-2"}
isSuccessful := []bool{true, false}
batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(isSuccessful))
batchHashes := make([]string, len(processingKeys))
for i := range batchHashes {
batchMeta := &types.BatchMeta{
StartChunkIndex: 0,
@@ -233,12 +237,12 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
batchHashes[i] = batch.Hash
}
for i, batchHash := range batchHashes {
for i, key := range processingKeys {
l2Relayer.processingFinalization.Store(key, batchHashes[i])
l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{
ContextID: batchHash,
ID: key,
IsSuccessful: isSuccessful[i],
TxHash: common.HexToHash("0x123456789abcdef"),
SenderType: types.SenderTypeFinalizeBatch,
})
}
@@ -303,14 +307,13 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
for _, confirmation := range confirmations {
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ContextID: confirmation.batchHash,
ID: confirmation.batchHash,
IsSuccessful: confirmation.isSuccessful,
SenderType: types.SenderTypeL2GasOracle,
})
}
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleImportedFailed}
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleFailed}
for i, confirmation := range confirmations {
gasOracle, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": confirmation.batchHash}, nil, 0)
if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] {
@@ -375,13 +378,13 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
convey.Convey("Failed to send setL2BaseFee tx to layer2", t, func() {
targetErr := errors.New("failed to send setL2BaseFee tx to layer2 error")
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (hash common.Hash, err error) {
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (hash common.Hash, err error) {
return common.Hash{}, targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (hash common.Hash, err error) {
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (hash common.Hash, err error) {
return common.HexToHash("0x56789abcdef1234"), nil
})

View File

@@ -3,6 +3,7 @@ package sender
import (
"fmt"
"math/big"
"sync/atomic"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/common"
@@ -10,16 +11,16 @@ import (
"github.com/scroll-tech/go-ethereum/log"
)
func (s *Sender) estimateLegacyGas(to *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (*FeeData, error) {
func (s *Sender) estimateLegacyGas(contract *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (*FeeData, error) {
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
if err != nil {
log.Error("estimateLegacyGas SuggestGasPrice failure", "error", err)
return nil, err
}
gasLimit, _, err := s.estimateGasLimit(to, data, gasPrice, nil, nil, value, false)
gasLimit, _, err := s.estimateGasLimit(contract, data, gasPrice, nil, nil, value, false)
if err != nil {
log.Error("estimateLegacyGas estimateGasLimit failure", "gas price", gasPrice, "from", s.auth.From.String(),
"nonce", s.auth.Nonce.Uint64(), "to address", to.String(), "fallback gas limit", fallbackGasLimit, "error", err)
log.Error("estimateLegacyGas estimateGasLimit failure", "gas price", gasPrice, "from", s.auth.From.Hex(),
"nonce", s.auth.Nonce.Uint64(), "contract address", contract.Hex(), "fallback gas limit", fallbackGasLimit, "error", err)
if fallbackGasLimit == 0 {
return nil, err
}
@@ -33,18 +34,25 @@ func (s *Sender) estimateLegacyGas(to *common.Address, value *big.Int, data []by
}, nil
}
func (s *Sender) estimateDynamicGas(to *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64, baseFee uint64) (*FeeData, error) {
func (s *Sender) estimateDynamicGas(contract *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (*FeeData, error) {
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
if err != nil {
log.Error("estimateDynamicGas SuggestGasTipCap failure", "error", err)
return nil, err
}
gasFeeCap := new(big.Int).Add(gasTipCap, new(big.Int).Mul(new(big.Int).SetUint64(baseFee), big.NewInt(2)))
gasLimit, accessList, err := s.estimateGasLimit(to, data, nil, gasTipCap, gasFeeCap, value, true)
baseFee := big.NewInt(0)
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
baseFee.SetUint64(feeGas)
}
gasFeeCap := new(big.Int).Add(
gasTipCap,
new(big.Int).Mul(baseFee, big.NewInt(2)),
)
gasLimit, accessList, err := s.estimateGasLimit(contract, data, nil, gasTipCap, gasFeeCap, value, true)
if err != nil {
log.Error("estimateDynamicGas estimateGasLimit failure",
"from", s.auth.From.String(), "nonce", s.auth.Nonce.Uint64(), "to address", to.String(),
"from", s.auth.From.Hex(), "nonce", s.auth.Nonce.Uint64(), "contract address", contract.Hex(),
"fallback gas limit", fallbackGasLimit, "error", err)
if fallbackGasLimit == 0 {
return nil, err
@@ -64,10 +72,10 @@ func (s *Sender) estimateDynamicGas(to *common.Address, value *big.Int, data []b
return feeData, nil
}
func (s *Sender) estimateGasLimit(to *common.Address, data []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int, useAccessList bool) (uint64, *types.AccessList, error) {
func (s *Sender) estimateGasLimit(contract *common.Address, data []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int, useAccessList bool) (uint64, *types.AccessList, error) {
msg := ethereum.CallMsg{
From: s.auth.From,
To: to,
To: contract,
GasPrice: gasPrice,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,

View File

@@ -0,0 +1,95 @@
package sender
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type senderMetrics struct {
senderCheckBalancerTotal *prometheus.CounterVec
senderCheckPendingTransactionTotal *prometheus.CounterVec
sendTransactionTotal *prometheus.CounterVec
sendTransactionFailureFullTx *prometheus.GaugeVec
sendTransactionFailureRepeatTransaction *prometheus.CounterVec
sendTransactionFailureGetFee *prometheus.CounterVec
sendTransactionFailureSendTx *prometheus.CounterVec
resubmitTransactionTotal *prometheus.CounterVec
currentPendingTxsNum *prometheus.GaugeVec
currentGasFeeCap *prometheus.GaugeVec
currentGasTipCap *prometheus.GaugeVec
currentGasPrice *prometheus.GaugeVec
currentGasLimit *prometheus.GaugeVec
currentNonce *prometheus.GaugeVec
}
var (
initSenderMetricOnce sync.Once
sm *senderMetrics
)
func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
initSenderMetricOnce.Do(func() {
sm = &senderMetrics{
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_send_transaction_total",
Help: "The total number of sending transaction.",
}, []string{"service", "name"}),
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_send_transaction_full_tx_failure_total",
Help: "The total number of sending transaction failure for full size tx.",
}, []string{"service", "name"}),
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_send_transaction_repeat_transaction_failure_total",
Help: "The total number of sending transaction failure for repeat transaction.",
}, []string{"service", "name"}),
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_send_transaction_get_fee_failure_total",
Help: "The total number of sending transaction failure for getting fee.",
}, []string{"service", "name"}),
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_send_transaction_send_tx_failure_total",
Help: "The total number of sending transaction failure for sending tx.",
}, []string{"service", "name"}),
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_send_transaction_resubmit_send_transaction_total",
Help: "The total number of resubmit transaction.",
}, []string{"service", "name"}),
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_pending_tx_count",
Help: "The pending tx count in the sender.",
}, []string{"service", "name"}),
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_gas_fee_cap",
Help: "The gas fee of current transaction.",
}, []string{"service", "name"}),
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_gas_tip_cap",
Help: "The gas tip of current transaction.",
}, []string{"service", "name"}),
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_gas_price_cap",
Help: "The gas price of current transaction.",
}, []string{"service", "name"}),
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_gas_limit",
Help: "The gas limit of current transaction.",
}, []string{"service", "name"}),
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_nonce",
Help: "The nonce of current transaction.",
}, []string{"service", "name"}),
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_check_pending_transaction_total",
Help: "The total number of check pending transaction.",
}, []string{"service", "name"}),
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_check_balancer_total",
Help: "The total number of check balancer.",
}, []string{"service", "name"}),
}
})
return sm
}

View File

@@ -1,30 +1,26 @@
package sender
import (
"bytes"
"context"
"crypto/ecdsa"
"errors"
"fmt"
"math/big"
"strings"
"sync/atomic"
"time"
cmapV2 "github.com/orcaman/concurrent-map/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/ethclient/gethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rlp"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
)
@@ -39,12 +35,18 @@ const (
LegacyTxType = "LegacyTx"
)
var (
// ErrNoAvailableAccount indicates no available account error in the account pool.
ErrNoAvailableAccount = errors.New("sender has no available account to send transaction")
// ErrFullPending sender's pending pool is full.
ErrFullPending = errors.New("sender's pending pool is full")
)
// Confirmation struct used to indicate transaction confirmation details
type Confirmation struct {
ContextID string
ID string
IsSuccessful bool
TxHash common.Hash
SenderType types.SenderType
}
// FeeData fee struct used to estimate gas price
@@ -53,11 +55,20 @@ type FeeData struct {
gasTipCap *big.Int
gasPrice *big.Int
accessList gethTypes.AccessList
accessList types.AccessList
gasLimit uint64
}
// PendingTransaction submitted but pending transactions
type PendingTransaction struct {
submitAt uint64
id string
feeData *FeeData
signer *bind.TransactOpts
tx *types.Transaction
}
// Sender Transaction sender to send transaction to l1/l2 geth
type Sender struct {
config *config.SenderConfig
@@ -67,25 +78,23 @@ type Sender struct {
ctx context.Context
service string
name string
senderType types.SenderType
auth *bind.TransactOpts
auth *bind.TransactOpts
minBalance *big.Int
db *gorm.DB
pendingTransactionOrm *orm.PendingTransaction
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
pendingTxs cmapV2.ConcurrentMap[string, *PendingTransaction] // Mapping from nonce to pending transaction
confirmCh chan *Confirmation
confirmCh chan *Confirmation
stopCh chan struct{}
stopCh chan struct{}
metrics *senderMetrics
}
// NewSender returns a new instance of transaction sender
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey, service, name string, senderType types.SenderType, db *gorm.DB, reg prometheus.Registerer) (*Sender, error) {
if config.EscalateMultipleNum <= config.EscalateMultipleDen {
return nil, fmt.Errorf("invalid params, EscalateMultipleNum; %v, EscalateMultipleDen: %v", config.EscalateMultipleNum, config.EscalateMultipleDen)
}
// txConfirmationCh is used to notify confirmed transaction
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey, service, name string, reg prometheus.Registerer) (*Sender, error) {
rpcClient, err := rpc.Dial(config.Endpoint)
if err != nil {
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
@@ -109,20 +118,36 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
}
auth.Nonce = big.NewInt(int64(nonce))
// get header by number
header, err := client.HeaderByNumber(ctx, nil)
if err != nil {
return nil, fmt.Errorf("failed to get header by number, err: %w", err)
}
var baseFeePerGas uint64
if config.TxType == DynamicFeeTxType {
if header.BaseFee != nil {
baseFeePerGas = header.BaseFee.Uint64()
} else {
return nil, errors.New("dynamic fee tx type not supported: header.BaseFee is nil")
}
}
sender := &Sender{
ctx: ctx,
config: config,
gethClient: gethclient.New(rpcClient),
client: client,
chainID: chainID,
auth: auth,
db: db,
pendingTransactionOrm: orm.NewPendingTransaction(db),
confirmCh: make(chan *Confirmation, 128),
stopCh: make(chan struct{}),
name: name,
service: service,
senderType: senderType,
ctx: ctx,
config: config,
gethClient: gethclient.New(rpcClient),
client: client,
chainID: chainID,
auth: auth,
minBalance: config.MinBalance,
confirmCh: make(chan *Confirmation, 128),
blockNumber: header.Number.Uint64(),
baseFeePerGas: baseFeePerGas,
pendingTxs: cmapV2.New[*PendingTransaction](),
stopCh: make(chan struct{}),
name: name,
service: service,
}
sender.metrics = initSenderMetrics(reg)
@@ -131,6 +156,21 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
return sender, nil
}
// PendingCount returns the current number of pending txs.
func (s *Sender) PendingCount() int {
return s.pendingTxs.Count()
}
// PendingLimit returns the maximum number of pending txs the sender can handle.
func (s *Sender) PendingLimit() int {
return s.config.PendingLimit
}
// IsFull returns true if the sender's pending tx pool is full.
func (s *Sender) IsFull() bool {
return s.pendingTxs.Count() >= s.config.PendingLimit
}
// GetChainID returns the chain ID associated with the sender.
func (s *Sender) GetChainID() *big.Int {
return s.chainID
@@ -139,7 +179,7 @@ func (s *Sender) GetChainID() *big.Int {
// Stop stop the sender module.
func (s *Sender) Stop() {
close(s.stopCh)
log.Info("sender stopped", "name", s.name, "service", s.service, "address", s.auth.From.String())
log.Info("Transaction sender stopped")
}
// ConfirmChan channel used to communicate with transaction sender
@@ -153,29 +193,40 @@ func (s *Sender) SendConfirmation(cfm *Confirmation) {
s.confirmCh <- cfm
}
func (s *Sender) getFeeData(target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64, baseFee uint64) (*FeeData, error) {
func (s *Sender) getFeeData(target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (*FeeData, error) {
if s.config.TxType == DynamicFeeTxType {
return s.estimateDynamicGas(target, value, data, fallbackGasLimit, baseFee)
return s.estimateDynamicGas(target, value, data, fallbackGasLimit)
}
return s.estimateLegacyGas(target, value, data, fallbackGasLimit)
}
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(contextID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (common.Hash, error) {
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, fallbackGasLimit uint64) (common.Hash, error) {
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
if s.IsFull() {
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(1)
return common.Hash{}, ErrFullPending
}
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(0)
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
s.metrics.sendTransactionFailureRepeatTransaction.WithLabelValues(s.service, s.name).Inc()
return common.Hash{}, fmt.Errorf("repeat transaction ID: %s", ID)
}
var (
feeData *FeeData
tx *gethTypes.Transaction
tx *types.Transaction
err error
)
blockNumber, baseFee, err := s.getBlockNumberAndBaseFee(s.ctx)
if err != nil {
log.Error("failed to get block number and base fee", "error", err)
return common.Hash{}, fmt.Errorf("failed to get block number and base fee, err: %w", err)
}
defer func() {
if err != nil {
s.pendingTxs.Remove(ID) // release the ID on failure
}
}()
if feeData, err = s.getFeeData(target, value, data, fallbackGasLimit, baseFee); err != nil {
if feeData, err = s.getFeeData(target, value, data, fallbackGasLimit); err != nil {
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
log.Error("failed to get fee data", "from", s.auth.From.String(), "nonce", s.auth.Nonce.Uint64(), "fallback gas limit", fallbackGasLimit, "err", err)
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
@@ -187,17 +238,22 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, value
return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err)
}
if err = s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, contextID, s.getSenderMeta(), tx, blockNumber); err != nil {
log.Error("failed to insert transaction", "from", s.auth.From.String(), "nonce", s.auth.Nonce.Uint64(), "err", err)
return common.Hash{}, fmt.Errorf("failed to insert transaction, err: %w", err)
// add pending transaction
pending := &PendingTransaction{
tx: tx,
id: ID,
signer: s.auth,
submitAt: atomic.LoadUint64(&s.blockNumber),
feeData: feeData,
}
s.pendingTxs.Set(ID, pending)
return tx.Hash(), nil
}
func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (*gethTypes.Transaction, error) {
func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (*types.Transaction, error) {
var (
nonce = s.auth.Nonce.Uint64()
txData gethTypes.TxData
txData types.TxData
)
// this is a resubmit call, override the nonce
@@ -205,10 +261,11 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value
nonce = *overrideNonce
}
// lock here to avoit blocking when call `SuggestGasPrice`
switch s.config.TxType {
case LegacyTxType:
// for ganache mock node
txData = &gethTypes.LegacyTx{
txData = &types.LegacyTx{
Nonce: nonce,
GasPrice: feeData.gasPrice,
Gas: feeData.gasLimit,
@@ -220,7 +277,7 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value
S: new(big.Int),
}
case AccessListTxType:
txData = &gethTypes.AccessListTx{
txData = &types.AccessListTx{
ChainID: s.chainID,
Nonce: nonce,
GasPrice: feeData.gasPrice,
@@ -234,7 +291,7 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value
S: new(big.Int),
}
default:
txData = &gethTypes.DynamicFeeTx{
txData = &types.DynamicFeeTx{
Nonce: nonce,
To: target,
Data: common.CopyBytes(data),
@@ -251,12 +308,11 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value
}
// sign and send
tx, err := s.auth.Signer(s.auth.From, gethTypes.NewTx(txData))
tx, err := s.auth.Signer(s.auth.From, types.NewTx(txData))
if err != nil {
log.Error("failed to sign tx", "address", s.auth.From.String(), "err", err)
return nil, err
}
if err = s.client.SendTransaction(s.ctx, tx); err != nil {
log.Error("failed to send tx", "tx hash", tx.Hash().String(), "from", s.auth.From.String(), "nonce", tx.Nonce(), "err", err)
// Check if contain nonce, and reset nonce
@@ -288,7 +344,7 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, value
return tx, nil
}
// resetNonce reset nonce if send signed tx failed.
// reSetNonce reset nonce if send signed tx failed.
func (s *Sender) resetNonce(ctx context.Context) {
nonce, err := s.client.PendingNonceAt(ctx, s.auth.From)
if err != nil {
@@ -298,7 +354,7 @@ func (s *Sender) resetNonce(ctx context.Context) {
s.auth.Nonce = big.NewInt(int64(nonce))
}
func (s *Sender) resubmitTransaction(auth *bind.TransactOpts, tx *gethTypes.Transaction, baseFee uint64) (*gethTypes.Transaction, error) {
func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts, tx *types.Transaction) (*types.Transaction, error) {
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
@@ -310,13 +366,14 @@ func (s *Sender) resubmitTransaction(auth *bind.TransactOpts, tx *gethTypes.Tran
"nonce": tx.Nonce(),
}
var feeData FeeData
feeData.gasLimit = tx.Gas()
switch s.config.TxType {
case LegacyTxType, AccessListTxType: // `LegacyTxType`is for ganache mock node
originalGasPrice := tx.GasPrice()
gasPrice := new(big.Int).Mul(escalateMultipleNum, originalGasPrice)
originalGasPrice := feeData.gasPrice
gasPrice := escalateMultipleNum.Mul(escalateMultipleNum, big.NewInt(feeData.gasPrice.Int64()))
gasPrice = gasPrice.Div(gasPrice, escalateMultipleDen)
if gasPrice.Cmp(feeData.gasPrice) < 0 {
gasPrice = feeData.gasPrice
}
if gasPrice.Cmp(maxGasPrice) > 0 {
gasPrice = maxGasPrice
}
@@ -330,16 +387,27 @@ func (s *Sender) resubmitTransaction(auth *bind.TransactOpts, tx *gethTypes.Tran
txInfo["original_gas_price"] = originalGasPrice.Uint64()
txInfo["adjusted_gas_price"] = gasPrice.Uint64()
default:
originalGasTipCap := tx.GasTipCap()
originalGasFeeCap := tx.GasFeeCap()
originalGasTipCap := big.NewInt(feeData.gasTipCap.Int64())
originalGasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
gasTipCap := new(big.Int).Mul(originalGasTipCap, escalateMultipleNum)
gasTipCap := big.NewInt(feeData.gasTipCap.Int64())
gasTipCap = gasTipCap.Mul(gasTipCap, escalateMultipleNum)
gasTipCap = gasTipCap.Div(gasTipCap, escalateMultipleDen)
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, escalateMultipleNum)
gasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
gasFeeCap = gasFeeCap.Mul(gasFeeCap, escalateMultipleNum)
gasFeeCap = gasFeeCap.Div(gasFeeCap, escalateMultipleDen)
if gasFeeCap.Cmp(feeData.gasFeeCap) < 0 {
gasFeeCap = feeData.gasFeeCap
}
if gasTipCap.Cmp(feeData.gasTipCap) < 0 {
gasTipCap = feeData.gasTipCap
}
// adjust for rising basefee
adjBaseFee := new(big.Int).SetUint64(baseFee)
adjBaseFee := big.NewInt(0)
if feeGas := atomic.LoadUint64(&s.baseFeePerGas); feeGas != 0 {
adjBaseFee.SetUint64(feeGas)
}
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
currentGasFeeCap := new(big.Int).Add(gasTipCap, adjBaseFee)
@@ -352,11 +420,6 @@ func (s *Sender) resubmitTransaction(auth *bind.TransactOpts, tx *gethTypes.Tran
gasFeeCap = maxGasPrice
}
// gasTipCap <= gasFeeCap
if gasTipCap.Cmp(gasFeeCap) > 0 {
gasTipCap = gasFeeCap
}
if originalGasTipCap.Cmp(gasTipCap) == 0 {
log.Warn("gas tip cap bump corner case, add 1 wei", "original", originalGasTipCap.Uint64(), "adjusted", gasTipCap.Uint64())
gasTipCap = new(big.Int).Add(gasTipCap, big.NewInt(1))
@@ -375,11 +438,11 @@ func (s *Sender) resubmitTransaction(auth *bind.TransactOpts, tx *gethTypes.Tran
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
}
log.Info("Transaction gas adjustment details", "txInfo", txInfo)
log.Info("Transaction gas adjustment details", txInfo)
nonce := tx.Nonce()
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
tx, err := s.createAndSendTx(&feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
tx, err := s.createAndSendTx(feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
if err != nil {
log.Error("failed to create and send tx (resubmit case)", "from", s.auth.From.String(), "nonce", nonce, "err", err)
return nil, err
@@ -389,116 +452,137 @@ func (s *Sender) resubmitTransaction(auth *bind.TransactOpts, tx *gethTypes.Tran
// checkPendingTransaction checks the confirmation status of pending transactions against the latest confirmed block number.
// If a transaction hasn't been confirmed after a certain number of blocks, it will be resubmitted with an increased gas price.
func (s *Sender) checkPendingTransaction() {
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64) {
number := header.Number.Uint64()
atomic.StoreUint64(&s.blockNumber, number)
blockNumber, baseFee, err := s.getBlockNumberAndBaseFee(s.ctx)
if err != nil {
log.Error("failed to get block number and base fee", "error", err)
return
if s.config.TxType == DynamicFeeTxType {
if header.BaseFee != nil {
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
} else {
log.Error("DynamicFeeTxType not supported, header.BaseFee nil")
}
}
transactionsToCheck, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(s.ctx, s.senderType, 100)
if err != nil {
log.Error("failed to load pending transactions", "sender meta", s.getSenderMeta(), "err", err)
return
}
confirmed, err := utils.GetLatestConfirmedBlockNumber(s.ctx, s.client, s.config.Confirmations)
if err != nil {
log.Error("failed to get latest confirmed block number", "confirmations", s.config.Confirmations, "err", err)
return
}
for _, txnToCheck := range transactionsToCheck {
tx := new(gethTypes.Transaction)
if err := tx.DecodeRLP(rlp.NewStream(bytes.NewReader(txnToCheck.RLPEncoding), 0)); err != nil {
log.Error("failed to decode RLP", "context ID", txnToCheck.ContextID, "sender meta", s.getSenderMeta(), "err", err)
for item := range s.pendingTxs.IterBuffered() {
key, pending := item.Key, item.Val
// ignore empty id, since we use empty id to occupy pending task
if pending == nil {
continue
}
receipt, err := s.client.TransactionReceipt(s.ctx, tx.Hash())
if (err == nil) && (receipt != nil) { // tx confirmed.
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
if receipt.BlockNumber.Uint64() <= confirmed {
err := s.db.Transaction(func(dbTX *gorm.DB) error {
// Update the status of the transaction to TxStatusConfirmed.
if err := s.pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(s.ctx, tx.Hash(), types.TxStatusConfirmed, dbTX); err != nil {
log.Error("failed to update transaction status by tx hash", "hash", tx.Hash().String(), "sender meta", s.getSenderMeta(), "from", s.auth.From.String(), "nonce", tx.Nonce(), "err", err)
return err
}
// Update other transactions with the same nonce and sender address as failed.
if err := s.pendingTransactionOrm.UpdateOtherTransactionsAsFailedByNonce(s.ctx, txnToCheck.SenderAddress, tx.Nonce(), tx.Hash(), dbTX); err != nil {
log.Error("failed to update other transactions as failed by nonce", "senderAddress", txnToCheck.SenderAddress, "nonce", tx.Nonce(), "excludedTxHash", tx.Hash(), "err", err)
return err
}
return nil
})
if err != nil {
log.Error("db transaction failed", "err", err)
}
s.pendingTxs.Remove(key)
// send confirm message
s.confirmCh <- &Confirmation{
ContextID: txnToCheck.ContextID,
IsSuccessful: receipt.Status == gethTypes.ReceiptStatusSuccessful,
TxHash: tx.Hash(),
SenderType: s.senderType,
ID: pending.id,
IsSuccessful: receipt.Status == types.ReceiptStatusSuccessful,
TxHash: pending.tx.Hash(),
}
}
} else if txnToCheck.Status == types.TxStatusPending && // Only try resubmitting a new transaction based on gas price of the last transaction (status pending) with same ContextID.
s.config.EscalateBlocks+txnToCheck.SubmitBlockNumber <= blockNumber {
// It's possible that the pending transaction was marked as failed earlier in this loop (e.g., if one of its replacements has already been confirmed).
// Therefore, we fetch the current transaction status again for accuracy before proceeding.
status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(s.ctx, tx.Hash())
if err != nil {
log.Error("failed to get transaction status by tx hash", "hash", tx.Hash().String(), "err", err)
return
}
if status == types.TxStatusConfirmedFailed {
log.Warn("transaction already marked as failed, skipping resubmission", "hash", tx.Hash().String())
return
}
} else if s.config.EscalateBlocks+pending.submitAt < number {
log.Info("resubmit transaction",
"hash", tx.Hash().String(),
"from", s.auth.From.String(),
"nonce", tx.Nonce(),
"submitBlockNumber", txnToCheck.SubmitBlockNumber,
"currentBlockNumber", blockNumber,
"escalateBlocks", s.config.EscalateBlocks)
"hash", pending.tx.Hash().String(),
"from", pending.signer.From.String(),
"nonce", pending.tx.Nonce(),
"submit block number", pending.submitAt,
"current block number", number,
"configured escalateBlocks", s.config.EscalateBlocks)
if newTx, err := s.resubmitTransaction(s.auth, tx, baseFee); err != nil {
s.metrics.resubmitTransactionFailedTotal.WithLabelValues(s.service, s.name).Inc()
log.Error("failed to resubmit transaction", "context ID", txnToCheck.ContextID, "sender meta", s.getSenderMeta(), "from", s.auth.From.String(), "nonce", newTx.Nonce(), "err", err)
} else {
err := s.db.Transaction(func(dbTX *gorm.DB) error {
// Update the status of the original transaction as replaced, while still checking its confirmation status.
if err := s.pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(s.ctx, tx.Hash(), types.TxStatusReplaced, dbTX); err != nil {
return fmt.Errorf("failed to update status of transaction with hash %s to TxStatusReplaced, err: %w", tx.Hash().String(), err)
}
// Record the new transaction that has replaced the original one.
if err := s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, txnToCheck.ContextID, s.getSenderMeta(), newTx, txnToCheck.SubmitBlockNumber, dbTX); err != nil {
return fmt.Errorf("failed to insert new pending transaction with context ID: %s, nonce: %d, hash: %v, err: %w", txnToCheck.ContextID, newTx.Nonce(), newTx.Hash().String(), err)
}
return nil
})
if err != nil {
log.Error("db transaction failed", "err", err)
if tx, err := s.resubmitTransaction(pending.feeData, pending.signer, pending.tx); err != nil {
// If account pool is empty, it will try again in next loop.
if !errors.Is(err, ErrNoAvailableAccount) {
log.Error("failed to resubmit transaction, reset submitAt", "tx hash", pending.tx.Hash().String(), "err", err)
}
// This means one of the old transactions is confirmed
// One scenario is
// 1. Initially, we replace the tx three times and submit it to local node.
// Currently, we keep the last tx hash in the memory.
// 2. Other node packed the 2-nd tx or 3-rd tx, and the local node has received the block now.
// 3. When we resubmit the 4-th tx, we got a nonce error.
// 4. We need to check the status of 3-rd tx stored in our memory
// 4.1 If the 3-rd tx is packed, we got a receipt and 3-nd is marked as confirmed.
// 4.2 If the 2-nd tx is packed, we got nothing from `TransactionReceipt` call. Since we
// cannot do anything about, we just log some information. In this case, the caller
// of `sender.SendTransaction` should write extra code to handle the situation.
// Another scenario is private key leaking and someone send a transaction with the same nonce.
// We need to stop the program and manually handle the situation.
if strings.Contains(err.Error(), "nonce") {
// This key can be deleted
s.pendingTxs.Remove(key)
// Try get receipt by the latest replaced tx hash
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
if (err == nil) && (receipt != nil) {
// send confirm message
s.confirmCh <- &Confirmation{
ID: pending.id,
IsSuccessful: receipt.Status == types.ReceiptStatusSuccessful,
TxHash: pending.tx.Hash(),
}
} else {
// The receipt can be nil since the confirmed transaction may not be the latest one.
// We just ignore it, the caller of the sender pool should handle this situation.
log.Warn("Pending transaction is confirmed by one of the replaced transactions", "key", key, "signer", pending.signer.From, "nonce", pending.tx.Nonce())
}
}
} else {
// flush submitAt
pending.tx = tx
pending.submitAt = number
}
}
}
}
// checkBalance checks balance and print error log if balance is under a threshold.
func (s *Sender) checkBalance(ctx context.Context) error {
bls, err := s.client.BalanceAt(ctx, s.auth.From, nil)
if err != nil {
log.Warn("failed to get balance", "address", s.auth.From.String(), "err", err)
return err
}
if bls.Cmp(s.minBalance) < 0 {
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s, address: %s",
bls.String(), s.minBalance.String(), s.auth.From.String())
}
return nil
}
// Loop is the main event loop
func (s *Sender) loop(ctx context.Context) {
checkTick := time.NewTicker(time.Duration(s.config.CheckPendingTime) * time.Second)
defer checkTick.Stop()
checkBalanceTicker := time.NewTicker(time.Duration(s.config.CheckBalanceTime) * time.Second)
defer checkBalanceTicker.Stop()
for {
select {
case <-checkTick.C:
s.checkPendingTransaction()
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
header, err := s.client.HeaderByNumber(s.ctx, nil)
if err != nil {
log.Error("failed to get latest head", "err", err)
continue
}
confirmed, err := utils.GetLatestConfirmedBlockNumber(s.ctx, s.client, s.config.Confirmations)
if err != nil {
log.Error("failed to get latest confirmed block number", "err", err)
continue
}
s.checkPendingTransaction(header, confirmed)
case <-checkBalanceTicker.C:
s.metrics.senderCheckBalancerTotal.WithLabelValues(s.service, s.name).Inc()
// Check and set balance.
if err := s.checkBalance(ctx); err != nil {
log.Error("check balance error", "err", err)
}
case <-ctx.Done():
return
case <-s.stopCh:
@@ -506,29 +590,3 @@ func (s *Sender) loop(ctx context.Context) {
}
}
}
func (s *Sender) getSenderMeta() *orm.SenderMeta {
return &orm.SenderMeta{
Name: s.name,
Service: s.service,
Address: s.auth.From,
Type: s.senderType,
}
}
func (s *Sender) getBlockNumberAndBaseFee(ctx context.Context) (uint64, uint64, error) {
header, err := s.client.HeaderByNumber(ctx, nil)
if err != nil {
return 0, 0, fmt.Errorf("failed to get header by number, err: %w", err)
}
var baseFeePerGas uint64
if s.config.TxType == DynamicFeeTxType {
if header.BaseFee != nil {
baseFeePerGas = header.BaseFee.Uint64()
} else {
return 0, 0, errors.New("dynamic fee tx type not supported: header.BaseFee is nil")
}
}
return header.Number.Uint64(), baseFeePerGas, nil
}

View File

@@ -1,75 +0,0 @@
package sender
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type senderMetrics struct {
senderCheckPendingTransactionTotal *prometheus.CounterVec
sendTransactionTotal *prometheus.CounterVec
sendTransactionFailureGetFee *prometheus.CounterVec
sendTransactionFailureSendTx *prometheus.CounterVec
resubmitTransactionTotal *prometheus.CounterVec
resubmitTransactionFailedTotal *prometheus.CounterVec
currentGasFeeCap *prometheus.GaugeVec
currentGasTipCap *prometheus.GaugeVec
currentGasPrice *prometheus.GaugeVec
currentGasLimit *prometheus.GaugeVec
}
var (
initSenderMetricOnce sync.Once
sm *senderMetrics
)
func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
initSenderMetricOnce.Do(func() {
sm = &senderMetrics{
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_send_transaction_total",
Help: "The total number of sending transactions.",
}, []string{"service", "name"}),
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_send_transaction_get_fee_failure_total",
Help: "The total number of sending transactions failure for getting fee.",
}, []string{"service", "name"}),
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_send_transaction_send_tx_failure_total",
Help: "The total number of sending transactions failure for sending tx.",
}, []string{"service", "name"}),
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_send_transaction_resubmit_send_transaction_total",
Help: "The total number of resubmit transactions.",
}, []string{"service", "name"}),
resubmitTransactionFailedTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_send_transaction_resubmit_send_transaction_failed_total",
Help: "The total number of failed resubmit transactions.",
}, []string{"service", "name"}),
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_gas_fee_cap",
Help: "The gas fee cap of current transaction.",
}, []string{"service", "name"}),
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_gas_tip_cap",
Help: "The gas tip cap of current transaction.",
}, []string{"service", "name"}),
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_gas_price_cap",
Help: "The gas price of current transaction.",
}, []string{"service", "name"}),
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "rollup_sender_gas_limit",
Help: "The gas limit of current transaction.",
}, []string{"service", "name"}),
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "rollup_sender_check_pending_transaction_total",
Help: "The total number of check pending transaction.",
}, []string{"service", "name"}),
}
})
return sm
}

View File

@@ -4,41 +4,37 @@ import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"math/big"
"os"
"strconv"
"testing"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/mock_bridge"
"scroll-tech/rollup/internal/config"
)
const TXBatch = 50
var (
privateKey *ecdsa.PrivateKey
cfg *config.Config
base *docker.App
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
db *gorm.DB
mockL1ContractsAddress common.Address
privateKey *ecdsa.PrivateKey
cfg *config.Config
base *docker.App
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
scrollChainAddress common.Address
)
func TestMain(m *testing.M) {
@@ -57,26 +53,14 @@ func setupEnv(t *testing.T) {
var err error
cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err)
base.RunL1Geth(t)
priv, err := crypto.HexToECDSA("1212121212121212121212121212121212121212121212121212121212121212")
assert.NoError(t, err)
// Load default private key.
privateKey = priv
base.RunL1Geth(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
base.RunDBImage(t)
db, err = database.InitDB(
&database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
},
)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfg.L1Config.RelayerConfig.SenderConfig.CheckBalanceTime = 1
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, base.L1gethImg.ChainID())
assert.NoError(t, err)
@@ -87,7 +71,7 @@ func setupEnv(t *testing.T) {
_, tx, _, err := mock_bridge.DeployMockBridgeL1(auth, l1Client)
assert.NoError(t, err)
mockL1ContractsAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
scrollChainAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
}
@@ -96,28 +80,22 @@ func TestSender(t *testing.T) {
setupEnv(t)
t.Run("test new sender", testNewSender)
t.Run("test pending limit", testPendLimit)
t.Run("test fallback gas limit", testFallbackGasLimit)
t.Run("test send and retrieve transaction", testSendAndRetrieveTransaction)
t.Run("test access list transaction gas limit", testAccessListTransactionGasLimit)
t.Run("test resubmit zero gas price transaction", testResubmitZeroGasPriceTransaction)
t.Run("test resubmit non-zero gas price transaction", testResubmitNonZeroGasPriceTransaction)
t.Run("test resubmit under priced transaction", testResubmitUnderpricedTransaction)
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
t.Run("test check pending transaction tx confirmed", testCheckPendingTransactionTxConfirmed)
t.Run("test check pending transaction resubmit tx confirmed", testCheckPendingTransactionResubmitTxConfirmed)
t.Run("test check pending transaction replaced tx confirmed", testCheckPendingTransactionReplacedTxConfirmed)
t.Run("test check pending transaction", testCheckPendingTransaction)
}
func testNewSender(t *testing.T) {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
// exit by Stop()
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy1.TxType = txType
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", nil)
assert.NoError(t, err)
newSender1.Stop()
@@ -125,57 +103,43 @@ func testNewSender(t *testing.T) {
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy2.TxType = txType
subCtx, cancel := context.WithCancel(context.Background())
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", nil)
assert.NoError(t, err)
cancel()
}
}
func testSendAndRetrieveTransaction(t *testing.T) {
for i, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
func testPendLimit(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
cfgCopy.Confirmations = rpc.LatestBlockNumber
cfgCopy.PendingLimit = 2
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
hash, err := s.SendTransaction("0", &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
assert.Len(t, txs, 1)
assert.Equal(t, "0", txs[0].ContextID)
assert.Equal(t, hash.String(), txs[0].Hash)
assert.Equal(t, uint8(i), txs[0].Type)
assert.Equal(t, types.TxStatusPending, txs[0].Status)
assert.Equal(t, "0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63", txs[0].SenderAddress)
assert.Equal(t, types.SenderTypeUnknown, txs[0].SenderType)
assert.Equal(t, "test", txs[0].SenderService)
assert.Equal(t, "test", txs[0].SenderName)
s.Stop()
for i := 0; i < 2*newSender.PendingLimit(); i++ {
_, err = newSender.SendTransaction(strconv.Itoa(i), &common.Address{}, big.NewInt(1), nil, 0)
assert.True(t, err == nil || (err != nil && err.Error() == "sender's pending pool is full"))
}
assert.True(t, newSender.PendingCount() <= newSender.PendingLimit())
newSender.Stop()
}
}
func testFallbackGasLimit(t *testing.T) {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
client, err := ethclient.Dial(cfgCopy.Endpoint)
assert.NoError(t, err)
// FallbackGasLimit = 0
txHash0, err := s.SendTransaction("0", &common.Address{}, big.NewInt(0), nil, 0)
txHash0, err := s.SendTransaction("0", &common.Address{}, big.NewInt(1), nil, 0)
assert.NoError(t, err)
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
assert.NoError(t, err)
@@ -183,12 +147,12 @@ func testFallbackGasLimit(t *testing.T) {
// FallbackGasLimit = 100000
patchGuard := gomonkey.ApplyPrivateMethod(s, "estimateGasLimit",
func(contract *common.Address, data []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, *gethTypes.AccessList, error) {
func(contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, *types.AccessList, error) {
return 0, nil, errors.New("estimateGasLimit error")
},
)
txHash1, err := s.SendTransaction("1", &common.Address{}, big.NewInt(0), nil, 100000)
txHash1, err := s.SendTransaction("1", &common.Address{}, big.NewInt(1), nil, 100000)
assert.NoError(t, err)
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
assert.NoError(t, err)
@@ -201,13 +165,9 @@ func testFallbackGasLimit(t *testing.T) {
func testResubmitZeroGasPriceTransaction(t *testing.T) {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
feeData := &FeeData{
gasPrice: big.NewInt(0),
@@ -219,7 +179,7 @@ func testResubmitZeroGasPriceTransaction(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, tx)
// Increase at least 1 wei in gas price, gas tip cap and gas fee cap.
_, err = s.resubmitTransaction(s.auth, tx, 0)
_, err = s.resubmitTransaction(feeData, s.auth, tx)
assert.NoError(t, err)
s.Stop()
}
@@ -227,13 +187,9 @@ func testResubmitZeroGasPriceTransaction(t *testing.T) {
func testAccessListTransactionGasLimit(t *testing.T) {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
l2GasOracleABI, err := bridgeAbi.L2GasPriceOracleMetaData.GetAbi()
@@ -242,12 +198,12 @@ func testAccessListTransactionGasLimit(t *testing.T) {
data, err := l2GasOracleABI.Pack("setL2BaseFee", big.NewInt(2333))
assert.NoError(t, err)
gasLimit, accessList, err := s.estimateGasLimit(&mockL1ContractsAddress, data, big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(0), true)
gasLimit, accessList, err := s.estimateGasLimit(&scrollChainAddress, data, big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(0), true)
assert.NoError(t, err)
assert.Equal(t, uint64(43472), gasLimit)
assert.NotNil(t, accessList)
gasLimit, accessList, err = s.estimateGasLimit(&mockL1ContractsAddress, data, big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(0), false)
gasLimit, accessList, err = s.estimateGasLimit(&scrollChainAddress, data, big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(100000000000), big.NewInt(0), false)
assert.NoError(t, err)
assert.Equal(t, uint64(43949), gasLimit)
assert.Nil(t, accessList)
@@ -258,16 +214,12 @@ func testAccessListTransactionGasLimit(t *testing.T) {
func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
// Bump gas price, gas tip cap and gas fee cap just touch the minimum threshold of 10% (default config of geth).
cfgCopy.EscalateMultipleNum = 110
cfgCopy.EscalateMultipleDen = 100
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
feeData := &FeeData{
gasPrice: big.NewInt(100000),
@@ -278,7 +230,7 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
tx, err := s.createAndSendTx(feeData, &common.Address{}, big.NewInt(0), nil, nil)
assert.NoError(t, err)
assert.NotNil(t, tx)
_, err = s.resubmitTransaction(s.auth, tx, 0)
_, err = s.resubmitTransaction(feeData, s.auth, tx)
assert.NoError(t, err)
s.Stop()
}
@@ -286,16 +238,12 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
func testResubmitUnderpricedTransaction(t *testing.T) {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
// Bump gas price, gas tip cap and gas fee cap less than 10% (default config of geth).
cfgCopy.EscalateMultipleNum = 109
cfgCopy.EscalateMultipleDen = 100
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
feeData := &FeeData{
gasPrice: big.NewInt(100000),
@@ -306,29 +254,27 @@ func testResubmitUnderpricedTransaction(t *testing.T) {
tx, err := s.createAndSendTx(feeData, &common.Address{}, big.NewInt(0), nil, nil)
assert.NoError(t, err)
assert.NotNil(t, tx)
_, err = s.resubmitTransaction(s.auth, tx, 0)
_, err = s.resubmitTransaction(feeData, s.auth, tx)
assert.Error(t, err, "replacement transaction underpriced")
s.Stop()
}
}
func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
txType := "DynamicFeeTx"
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
s.baseFeePerGas = 1000
feeData, err := s.getFeeData(&common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
tx := gethTypes.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 21000, big.NewInt(0), nil)
baseFeePerGas := uint64(1000)
// bump the basefee by 10x
baseFeePerGas *= 10
s.baseFeePerGas *= 10
// resubmit and check that the gas fee has been adjusted accordingly
newTx, err := s.resubmitTransaction(s.auth, tx, baseFeePerGas)
newTx, err := s.resubmitTransaction(feeData, s.auth, tx)
assert.NoError(t, err)
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
@@ -336,171 +282,104 @@ func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
adjBaseFee := new(big.Int)
adjBaseFee.SetUint64(baseFeePerGas)
adjBaseFee.SetUint64(s.baseFeePerGas)
adjBaseFee = adjBaseFee.Mul(adjBaseFee, escalateMultipleNum)
adjBaseFee = adjBaseFee.Div(adjBaseFee, escalateMultipleDen)
expectedGasFeeCap := new(big.Int).Add(tx.GasTipCap(), adjBaseFee)
expectedGasFeeCap := new(big.Int).Add(feeData.gasTipCap, adjBaseFee)
if expectedGasFeeCap.Cmp(maxGasPrice) > 0 {
expectedGasFeeCap = maxGasPrice
}
assert.Equal(t, expectedGasFeeCap.Int64(), newTx.GasFeeCap().Int64())
s.Stop()
}
func testCheckPendingTransactionTxConfirmed(t *testing.T) {
func testCheckPendingTransaction(t *testing.T) {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
_, err = s.SendTransaction("test", &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
header := &types.Header{Number: big.NewInt(100), BaseFee: big.NewInt(100)}
confirmed := uint64(100)
receipt := &types.Receipt{Status: types.ReceiptStatusSuccessful, BlockNumber: big.NewInt(90)}
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
assert.Len(t, txs, 1)
assert.Equal(t, types.TxStatusPending, txs[0].Status)
assert.Equal(t, types.SenderTypeCommitBatch, txs[0].SenderType)
testCases := []struct {
name string
receipt *types.Receipt
receiptErr error
resubmitErr error
expectedCount int
expectedFound bool
}{
{
name: "Normal case, transaction receipt exists and successful",
receipt: receipt,
receiptErr: nil,
resubmitErr: nil,
expectedCount: 0,
expectedFound: false,
},
{
name: "Resubmit case, resubmitTransaction error (not nonce) case",
receipt: receipt,
receiptErr: errors.New("receipt error"),
resubmitErr: errors.New("resubmit error"),
expectedCount: 1,
expectedFound: true,
},
{
name: "Resubmit case, resubmitTransaction success case",
receipt: receipt,
receiptErr: errors.New("receipt error"),
resubmitErr: nil,
expectedCount: 1,
expectedFound: true,
},
}
patchGuard := gomonkey.ApplyMethodFunc(s.client, "TransactionReceipt", func(_ context.Context, hash common.Hash) (*gethTypes.Receipt, error) {
return &gethTypes.Receipt{TxHash: hash, BlockNumber: big.NewInt(0), Status: gethTypes.ReceiptStatusSuccessful}, nil
})
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
var c *ethclient.Client
patchGuard := gomonkey.ApplyMethodFunc(c, "TransactionReceipt", func(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
return tc.receipt, tc.receiptErr
})
patchGuard.ApplyPrivateMethod(s, "resubmitTransaction",
func(feeData *FeeData, auth *bind.TransactOpts, tx *types.Transaction) (*types.Transaction, error) {
return tx, tc.resubmitErr
},
)
s.checkPendingTransaction()
assert.NoError(t, err)
pendingTx := &PendingTransaction{id: "abc", tx: tx, signer: s.auth, submitAt: header.Number.Uint64() - s.config.EscalateBlocks - 1}
s.pendingTxs.Set(pendingTx.id, pendingTx)
s.checkPendingTransaction(header, confirmed)
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
assert.Len(t, txs, 0)
if tc.receiptErr == nil {
expectedConfirmation := &Confirmation{
ID: pendingTx.id,
IsSuccessful: tc.receipt.Status == types.ReceiptStatusSuccessful,
TxHash: pendingTx.tx.Hash(),
}
actualConfirmation := <-s.confirmCh
assert.Equal(t, expectedConfirmation, actualConfirmation)
}
if tc.expectedFound && tc.resubmitErr == nil {
actualPendingTx, found := s.pendingTxs.Get(pendingTx.id)
assert.Equal(t, true, found)
assert.Equal(t, header.Number.Uint64(), actualPendingTx.submitAt)
}
_, found := s.pendingTxs.Get(pendingTx.id)
assert.Equal(t, tc.expectedFound, found)
assert.Equal(t, tc.expectedCount, s.pendingTxs.Count())
patchGuard.Reset()
})
}
s.Stop()
patchGuard.Reset()
}
}
func testCheckPendingTransactionResubmitTxConfirmed(t *testing.T) {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.EscalateBlocks = 0
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeFinalizeBatch, db, nil)
assert.NoError(t, err)
originTxHash, err := s.SendTransaction("test", &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
assert.Len(t, txs, 1)
assert.Equal(t, types.TxStatusPending, txs[0].Status)
assert.Equal(t, types.SenderTypeFinalizeBatch, txs[0].SenderType)
patchGuard := gomonkey.ApplyMethodFunc(s.client, "TransactionReceipt", func(_ context.Context, hash common.Hash) (*gethTypes.Receipt, error) {
if hash == originTxHash {
return nil, fmt.Errorf("simulated transaction receipt error")
}
return &gethTypes.Receipt{TxHash: hash, BlockNumber: big.NewInt(0), Status: gethTypes.ReceiptStatusSuccessful}, nil
})
// Attempt to resubmit the transaction.
s.checkPendingTransaction()
assert.NoError(t, err)
status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), originTxHash)
assert.NoError(t, err)
assert.Equal(t, types.TxStatusReplaced, status)
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 2)
assert.NoError(t, err)
assert.Len(t, txs, 2)
assert.Equal(t, types.TxStatusReplaced, txs[0].Status)
assert.Equal(t, types.TxStatusPending, txs[1].Status)
// Check the pending transactions again after attempting to resubmit.
s.checkPendingTransaction()
assert.NoError(t, err)
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
assert.Len(t, txs, 0)
s.Stop()
patchGuard.Reset()
}
}
func testCheckPendingTransactionReplacedTxConfirmed(t *testing.T) {
for _, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.EscalateBlocks = 0
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeL1GasOracle, db, nil)
assert.NoError(t, err)
txHash, err := s.SendTransaction("test", &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
assert.Len(t, txs, 1)
assert.Equal(t, types.TxStatusPending, txs[0].Status)
assert.Equal(t, types.SenderTypeL1GasOracle, txs[0].SenderType)
patchGuard := gomonkey.ApplyMethodFunc(s.client, "TransactionReceipt", func(_ context.Context, hash common.Hash) (*gethTypes.Receipt, error) {
var status types.TxStatus
status, err = s.pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), hash)
if err != nil {
return nil, fmt.Errorf("failed to get transaction status, hash: %s, err: %w", hash.String(), err)
}
// If the transaction status is 'replaced', return a successful receipt.
if status == types.TxStatusReplaced {
return &gethTypes.Receipt{
TxHash: hash,
BlockNumber: big.NewInt(0),
Status: gethTypes.ReceiptStatusSuccessful,
}, nil
}
return nil, fmt.Errorf("simulated transaction receipt error")
})
// Attempt to resubmit the transaction.
s.checkPendingTransaction()
assert.NoError(t, err)
status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), txHash)
assert.NoError(t, err)
assert.Equal(t, types.TxStatusReplaced, status)
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 2)
assert.NoError(t, err)
assert.Len(t, txs, 2)
assert.Equal(t, types.TxStatusReplaced, txs[0].Status)
assert.Equal(t, types.TxStatusPending, txs[1].Status)
// Check the pending transactions again after attempting to resubmit.
s.checkPendingTransaction()
assert.NoError(t, err)
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
assert.Len(t, txs, 0)
s.Stop()
patchGuard.Reset()
}
}

View File

@@ -22,7 +22,6 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/types"
cutils "scroll-tech/common/utils"
bridgeAbi "scroll-tech/rollup/abi"
@@ -52,7 +51,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.GasOracleSenderPrivateKey, "test", "test", types.SenderTypeUnknown, db, nil)
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.GasOracleSenderPrivateKey, "test", "test", nil)
assert.NoError(t, err)
// Create several transactions and commit to block

View File

@@ -3,29 +3,27 @@ package orm
import (
"context"
"encoding/json"
"math/big"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
)
var (
base *docker.App
db *gorm.DB
l2BlockOrm *L2Block
chunkOrm *Chunk
batchOrm *Batch
pendingTransactionOrm *PendingTransaction
db *gorm.DB
l2BlockOrm *L2Block
chunkOrm *Chunk
batchOrm *Batch
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
@@ -62,7 +60,6 @@ func setupEnv(t *testing.T) {
batchOrm = NewBatch(db)
chunkOrm = NewChunk(db)
l2BlockOrm = NewL2Block(db)
pendingTransactionOrm = NewPendingTransaction(db)
templateBlockTrace, err := os.ReadFile("../../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
@@ -313,85 +310,3 @@ func TestBatchOrm(t *testing.T) {
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
}
func TestTransactionOrm(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
tx0 := gethTypes.NewTx(&gethTypes.DynamicFeeTx{
Nonce: 0,
To: &common.Address{},
Data: []byte{},
Gas: 21000,
AccessList: gethTypes.AccessList{},
Value: big.NewInt(0),
ChainID: big.NewInt(1),
GasTipCap: big.NewInt(0),
GasFeeCap: big.NewInt(1),
V: big.NewInt(0),
R: big.NewInt(0),
S: big.NewInt(0),
})
tx1 := gethTypes.NewTx(&gethTypes.DynamicFeeTx{
Nonce: 0,
To: &common.Address{},
Data: []byte{},
Gas: 42000,
AccessList: gethTypes.AccessList{},
Value: big.NewInt(0),
ChainID: big.NewInt(1),
GasTipCap: big.NewInt(1),
GasFeeCap: big.NewInt(2),
V: big.NewInt(0),
R: big.NewInt(0),
S: big.NewInt(0),
})
senderMeta := &SenderMeta{
Name: "testName",
Service: "testService",
Address: common.HexToAddress("0x1"),
Type: types.SenderTypeCommitBatch,
}
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx0, 0)
assert.NoError(t, err)
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx1, 0)
assert.NoError(t, err)
err = pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(context.Background(), tx0.Hash(), types.TxStatusReplaced)
assert.NoError(t, err)
txs, err := pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), senderMeta.Type, 2)
assert.NoError(t, err)
assert.Len(t, txs, 2)
assert.Equal(t, tx1.Type(), txs[1].Type)
assert.Equal(t, tx1.Nonce(), txs[1].Nonce)
assert.Equal(t, tx1.Gas(), txs[1].GasLimit)
assert.Equal(t, tx1.GasTipCap().Uint64(), txs[1].GasTipCap)
assert.Equal(t, tx1.GasFeeCap().Uint64(), txs[1].GasFeeCap)
assert.Equal(t, tx1.ChainId().Uint64(), txs[1].ChainID)
assert.Equal(t, senderMeta.Name, txs[1].SenderName)
assert.Equal(t, senderMeta.Service, txs[1].SenderService)
assert.Equal(t, senderMeta.Address.String(), txs[1].SenderAddress)
assert.Equal(t, senderMeta.Type, txs[1].SenderType)
err = pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(context.Background(), tx1.Hash(), types.TxStatusConfirmed)
assert.NoError(t, err)
txs, err = pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), senderMeta.Type, 2)
assert.NoError(t, err)
assert.Len(t, txs, 1)
err = pendingTransactionOrm.UpdateOtherTransactionsAsFailedByNonce(context.Background(), senderMeta.Address.String(), tx1.Nonce(), tx1.Hash())
assert.NoError(t, err)
txs, err = pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), senderMeta.Type, 2)
assert.NoError(t, err)
assert.Len(t, txs, 0)
status, err := pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), tx0.Hash())
assert.NoError(t, err)
assert.Equal(t, types.TxStatusConfirmedFailed, status)
}

View File

@@ -1,155 +0,0 @@
package orm
import (
"bytes"
"context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// SenderMeta holds the metadata for a transaction sender including the name, service, address and type.
type SenderMeta struct {
Name string
Service string
Address common.Address
Type types.SenderType
}
// PendingTransaction represents the structure of a transaction in the database.
type PendingTransaction struct {
db *gorm.DB `gorm:"column:-"`
ID uint `json:"id" gorm:"id;primaryKey"`
ContextID string `json:"context_id" gorm:"context_id"`
Hash string `json:"hash" gorm:"hash"`
ChainID uint64 `json:"chain_id" gorm:"chain_id"`
Type uint8 `json:"type" gorm:"type"`
GasTipCap uint64 `json:"gas_tip_cap" gorm:"gas_tip_cap"`
GasFeeCap uint64 `json:"gas_fee_cap" gorm:"gas_fee_cap"`
GasLimit uint64 `json:"gas_limit" gorm:"gas_limit"`
Nonce uint64 `json:"nonce" gorm:"nonce"`
SubmitBlockNumber uint64 `json:"submit_block_number" gorm:"submit_block_number"`
Status types.TxStatus `json:"status" gorm:"status"`
RLPEncoding []byte `json:"rlp_encoding" gorm:"rlp_encoding"`
SenderName string `json:"sender_name" gorm:"sender_name"`
SenderService string `json:"sender_service" gorm:"sender_service"`
SenderAddress string `json:"sender_address" gorm:"sender_address"`
SenderType types.SenderType `json:"sender_type" gorm:"sender_type"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at"`
}
// TableName returns the table name for the Transaction model.
func (*PendingTransaction) TableName() string {
return "pending_transaction"
}
// NewPendingTransaction returns a new instance of PendingTransaction.
func NewPendingTransaction(db *gorm.DB) *PendingTransaction {
return &PendingTransaction{db: db}
}
// GetTxStatusByTxHash retrieves the status of a transaction by its hash.
func (o *PendingTransaction) GetTxStatusByTxHash(ctx context.Context, hash common.Hash) (types.TxStatus, error) {
var status types.TxStatus
db := o.db.WithContext(ctx)
db = db.Model(&PendingTransaction{})
db = db.Select("status")
db = db.Where("hash = ?", hash.String())
if err := db.Scan(&status).Error; err != nil {
return types.TxStatusUnknown, fmt.Errorf("failed to get tx status by hash, hash: %v, err: %w", hash, err)
}
return status, nil
}
// GetPendingOrReplacedTransactionsBySenderType retrieves pending or replaced transactions filtered by sender type, ordered by nonce, then gas_fee_cap (gas_price in legacy tx), and limited to a specified count.
func (o *PendingTransaction) GetPendingOrReplacedTransactionsBySenderType(ctx context.Context, senderType types.SenderType, limit int) ([]PendingTransaction, error) {
var transactions []PendingTransaction
db := o.db.WithContext(ctx)
db = db.Model(&PendingTransaction{})
db = db.Where("sender_type = ?", senderType)
db = db.Where("status = ? OR status = ?", types.TxStatusPending, types.TxStatusReplaced)
db = db.Order("nonce asc")
db = db.Order("gas_fee_cap asc")
db = db.Limit(limit)
if err := db.Find(&transactions).Error; err != nil {
return nil, fmt.Errorf("failed to get pending or replaced transactions by sender type, error: %w", err)
}
return transactions, nil
}
// InsertPendingTransaction creates a new pending transaction record and stores it in the database.
func (o *PendingTransaction) InsertPendingTransaction(ctx context.Context, contextID string, senderMeta *SenderMeta, tx *gethTypes.Transaction, submitBlockNumber uint64, dbTX ...*gorm.DB) error {
rlp := new(bytes.Buffer)
if err := tx.EncodeRLP(rlp); err != nil {
return fmt.Errorf("failed to encode rlp, err: %w", err)
}
newTransaction := &PendingTransaction{
ContextID: contextID,
Hash: tx.Hash().String(),
Type: tx.Type(),
ChainID: tx.ChainId().Uint64(),
GasFeeCap: tx.GasFeeCap().Uint64(),
GasTipCap: tx.GasTipCap().Uint64(),
GasLimit: tx.Gas(),
Nonce: tx.Nonce(),
SubmitBlockNumber: submitBlockNumber,
Status: types.TxStatusPending,
RLPEncoding: rlp.Bytes(),
SenderName: senderMeta.Name,
SenderAddress: senderMeta.Address.String(),
SenderService: senderMeta.Service,
SenderType: senderMeta.Type,
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&PendingTransaction{})
if err := db.Create(newTransaction).Error; err != nil {
return fmt.Errorf("failed to InsertTransaction, error: %w", err)
}
return nil
}
// UpdatePendingTransactionStatusByTxHash updates the status of a transaction based on the transaction hash.
func (o *PendingTransaction) UpdatePendingTransactionStatusByTxHash(ctx context.Context, hash common.Hash, status types.TxStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&PendingTransaction{})
db = db.Where("hash = ?", hash.String())
if err := db.Update("status", status).Error; err != nil {
return fmt.Errorf("failed to UpdatePendingTransactionStatusByTxHash, txHash: %s, error: %w", hash, err)
}
return nil
}
// UpdateOtherTransactionsAsFailedByNonce updates the status of all transactions to TxStatusConfirmedFailed for a specific nonce and sender address, excluding a specified transaction hash.
func (o *PendingTransaction) UpdateOtherTransactionsAsFailedByNonce(ctx context.Context, senderAddress string, nonce uint64, hash common.Hash, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&PendingTransaction{})
db = db.Where("sender_address = ?", senderAddress)
db = db.Where("nonce = ?", nonce)
db = db.Where("hash != ?", hash.String())
if err := db.Update("status", types.TxStatusConfirmedFailed).Error; err != nil {
return fmt.Errorf("failed to update other transactions as failed by nonce, senderAddress: %s, nonce: %d, txHash: %s, error: %w", senderAddress, nonce, hash, err)
}
return nil
}