Compare commits

..

25 Commits

Author SHA1 Message Date
Mengran Lan
dbaf8531b1 upgrade snark-verifier-sdk 2024-05-30 22:35:56 +08:00
Mengran Lan
0ca302a654 upgrade to rc4 2024-05-30 22:09:11 +08:00
Mengran Lan
124d10820a enable gzip when sending http request 2024-05-30 14:44:18 +08:00
Mengran Lan
614a894aa1 add sleep logic when failed to fetch task from coordinator 2024-05-28 15:19:54 +08:00
Mengran Lan
c4f54da7ca upgrade zk-circuits to v0.11.0rc2 2024-05-28 10:31:58 +08:00
Mengran Lan
46f5849ae0 comment types for next 2024-05-27 17:24:31 +08:00
Mengran Lan
45d8f66864 update cargo depends 2024-05-27 17:19:02 +08:00
Mengran Lan
94e1ea3a08 change prover_next version to fit the e2e test 2024-05-27 16:53:44 +08:00
Mengran Lan
62c1f00d3b copy libzktrie.so to lib dir 2024-05-27 15:54:49 +08:00
Mengran Lan
038d7a5bbf tmp commit, test next handler wrapper logic (set next handler as default) 2024-05-26 23:13:11 +08:00
Mengran Lan
112e9ac42b add task_cache logic 2024-05-24 13:07:10 +08:00
Mengran Lan
728266ebad add info logs for circuits handler 2024-05-23 11:31:09 +08:00
Mengran Lan
7b8f30d230 add second zkevm-handler && add proof_check when proving batch 2024-05-22 18:47:06 +08:00
Mengran Lan
69ca648c83 utilize proof_status logic 2024-05-22 15:50:06 +08:00
Mengran Lan
00a07a8258 build using --rlease && fix bug in proof status 2024-05-22 11:41:28 +08:00
Mengran Lan
f87e5b5ca7 fix bug, action not taken if re-login to coordinator 2024-05-21 23:48:29 +08:00
Mengran Lan
7b848f971b fmt code 2024-05-21 12:08:31 +08:00
Mengran Lan
49166ec8d0 change l2geth config to option 2024-05-21 12:06:55 +08:00
Mengran Lan
2d0c36eb5a geth client add tokio runtime 2024-05-20 22:47:46 +08:00
Mengran Lan
445a8d592a unify coordinator client api, add logs 2024-05-20 19:03:18 +08:00
Mengran Lan
eadc51d33b set vk in get task request 2024-05-20 16:18:00 +08:00
Mengran Lan
254a7faf58 init the log; add tokio runtime 2024-05-20 16:15:06 +08:00
Mengran Lan
173cbc4dc4 first compile-ready version 2024-05-16 11:17:16 +08:00
Mengran Lan
94bd5917ba finish most logic, leaving some rust-style compiler issue to be solved 2024-05-15 14:28:01 +08:00
Mengran Lan
107aa5792b tmp save 2024-05-13 15:59:06 +08:00
98 changed files with 7597 additions and 1588 deletions

View File

@@ -34,7 +34,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
@@ -48,7 +48,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Test
run: |
make test
@@ -67,7 +67,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/bridge-history-api/ -w .

View File

@@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: check diff

View File

@@ -37,7 +37,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
@@ -56,7 +56,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -81,7 +81,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:

View File

@@ -31,7 +31,7 @@ jobs:
steps:
- name: Checkout sources
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
submodules: recursive
@@ -98,7 +98,7 @@ jobs:
steps:
- name: Checkout sources
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
submodules: recursive

View File

@@ -41,7 +41,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Lint
working-directory: 'coordinator'
run: |
@@ -56,7 +56,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -77,7 +77,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v4
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - name: Build and push
@@ -97,7 +97,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:

View File

@@ -34,7 +34,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Lint
working-directory: 'database'
run: |
@@ -49,7 +49,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -74,7 +74,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -58,7 +58,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -103,7 +103,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -148,7 +148,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -193,7 +193,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -238,7 +238,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -283,7 +283,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -328,7 +328,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx

View File

@@ -24,7 +24,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:

View File

@@ -33,7 +33,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -60,7 +60,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -87,7 +87,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -114,7 +114,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -140,7 +140,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -166,7 +166,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -193,7 +193,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx

View File

@@ -34,7 +34,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
@@ -58,7 +58,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
@@ -75,7 +75,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
@@ -89,7 +89,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/prover/ -w .

View File

@@ -36,7 +36,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
@@ -60,7 +60,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- name: Run goimports lint
@@ -85,7 +85,7 @@ jobs:
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install Solc
uses: supplypike/setup-bin@v3
with:
@@ -117,7 +117,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v4
# uses: actions/checkout@v2
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v2
# - run: make docker

2
.gitignore vendored
View File

@@ -20,3 +20,5 @@ coverage.txt
# misc
sftp-config.json
*~
target

View File

@@ -44,8 +44,8 @@ fmt: ## format the code
dev_docker: ## build docker images for development/testing usages
docker pull postgres
docker build -t scroll_l1geth ./common/testcontainers/docker/l1geth/
docker build -t scroll_l2geth ./common/testcontainers/docker/l2geth/
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
clean: ## Empty out the bin folder
@rm -rf build/bin

File diff suppressed because one or more lines are too long

View File

@@ -26,27 +26,32 @@ func init() {
Name: "reset",
Usage: "Clean and reset database.",
Action: resetDB,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "status",
Usage: "Check migration status.",
Action: checkDBStatus,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "version",
Usage: "Display the current database version.",
Action: dbVersion,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "migrate",
Usage: "Migrate the database to the latest version.",
Action: migrateDB,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "rollback",
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
Action: rollbackDB,
Flags: []cli.Flag{
&utils.ConfigFileFlag,
&cli.IntFlag{
Name: "version",
Usage: "Rollback to the specified version.",

View File

@@ -18,8 +18,7 @@
"PufferGatewayAddr": "0xA033Ff09f2da45f0e9ae495f525363722Df42b2a",
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
"BatchBridgeGatewayAddr": "0x0000000000000000000000000000000000000000"
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
},
"L2": {
"confirmation": 0,
@@ -38,8 +37,7 @@
"DAIGatewayAddr": "0xaC78dff3A87b5b534e366A93E785a0ce8fA6Cc62",
"PufferGatewayAddr": "0x9eBf2f33526CD571f8b2ad312492cb650870CFd6",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79",
"MessageQueueAddr": "0x5300000000000000000000000000000000000000",
"BatchBridgeGatewayAddr": "0x0000000000000000000000000000000000000000"
"MessageQueueAddr": "0x5300000000000000000000000000000000000000"
},
"db": {
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",

View File

@@ -29,7 +29,6 @@ type FetcherConfig struct {
ScrollChainAddr string `json:"ScrollChainAddr"`
GatewayRouterAddr string `json:"GatewayRouterAddr"`
MessageQueueAddr string `json:"MessageQueueAddr"`
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
}
// RedisConfig redis config

View File

@@ -8,17 +8,8 @@ import (
)
var (
// TxsByAddressCtl the TxsByAddressController instance
TxsByAddressCtl *TxsByAddressController
// TxsByHashesCtl the TxsByHashesController instance
TxsByHashesCtl *TxsByHashesController
// L2UnclaimedWithdrawalsByAddressCtl the L2UnclaimedWithdrawalsByAddressController instance
L2UnclaimedWithdrawalsByAddressCtl *L2UnclaimedWithdrawalsByAddressController
// L2WithdrawalsByAddressCtl the L2WithdrawalsByAddressController instance
L2WithdrawalsByAddressCtl *L2WithdrawalsByAddressController
// HistoryCtrler is controller instance
HistoryCtrler *HistoryController
initControllerOnce sync.Once
)
@@ -26,9 +17,6 @@ var (
// InitController inits Controller with database
func InitController(db *gorm.DB, redis *redis.Client) {
initControllerOnce.Do(func() {
TxsByAddressCtl = NewTxsByAddressController(db, redis)
TxsByHashesCtl = NewTxsByHashesController(db, redis)
L2UnclaimedWithdrawalsByAddressCtl = NewL2UnclaimedWithdrawalsByAddressController(db, redis)
L2WithdrawalsByAddressCtl = NewL2WithdrawalsByAddressController(db, redis)
HistoryCtrler = NewHistoryController(db, redis)
})
}

View File

@@ -0,0 +1,94 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// HistoryController contains the query claimable txs service
type HistoryController struct {
historyLogic *logic.HistoryLogic
}
// NewHistoryController return HistoryController instance
func NewHistoryController(db *gorm.DB, redis *redis.Client) *HistoryController {
return &HistoryController{
historyLogic: logic.NewHistoryLogic(db, redis),
}
}
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
func (c *HistoryController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// GetL2WithdrawalsByAddress defines the http get method behavior
func (c *HistoryController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// GetTxsByAddress defines the http get method behavior
func (c *HistoryController) GetTxsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}
// PostQueryTxsByHashes defines the http post method behavior
func (c *HistoryController) PostQueryTxsByHashes(ctx *gin.Context) {
var req types.QueryByHashRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
return
}
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// L2UnclaimedWithdrawalsByAddressController the controller of GetL2UnclaimedWithdrawalsByAddress
type L2UnclaimedWithdrawalsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewL2UnclaimedWithdrawalsByAddressController create new L2UnclaimedWithdrawalsByAddressController
func NewL2UnclaimedWithdrawalsByAddressController(db *gorm.DB, redisClient *redis.Client) *L2UnclaimedWithdrawalsByAddressController {
return &L2UnclaimedWithdrawalsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetL2UnclaimedWithdrawalsByAddress defines the http get method behavior
func (c *L2UnclaimedWithdrawalsByAddressController) GetL2UnclaimedWithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2UnclaimedWithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2ClaimableWithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// L2WithdrawalsByAddressController the controller of GetL2WithdrawalsByAddress
type L2WithdrawalsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewL2WithdrawalsByAddressController create new L2WithdrawalsByAddressController
func NewL2WithdrawalsByAddressController(db *gorm.DB, redisClient *redis.Client) *L2WithdrawalsByAddressController {
return &L2WithdrawalsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetL2WithdrawalsByAddress defines the http get method behavior
func (c *L2WithdrawalsByAddressController) GetL2WithdrawalsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetL2WithdrawalsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetL2WithdrawalsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// TxsByAddressController the controller of GetTxsByAddress
type TxsByAddressController struct {
historyLogic *logic.HistoryLogic
}
// NewTxsByAddressController create new TxsByAddressController
func NewTxsByAddressController(db *gorm.DB, redisClient *redis.Client) *TxsByAddressController {
return &TxsByAddressController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// GetTxsByAddress defines the http get method behavior
func (c *TxsByAddressController) GetTxsByAddress(ctx *gin.Context) {
var req types.QueryByAddressRequest
if err := ctx.ShouldBind(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
pagedTxs, total, err := c.historyLogic.GetTxsByAddress(ctx, req.Address, req.Page, req.PageSize)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsError, err)
return
}
resultData := &types.ResultData{Results: pagedTxs, Total: total}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -1,40 +0,0 @@
package api
import (
"github.com/gin-gonic/gin"
"github.com/go-redis/redis/v8"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/logic"
"scroll-tech/bridge-history-api/internal/types"
)
// TxsByHashesController the controller of PostQueryTxsByHashes
type TxsByHashesController struct {
historyLogic *logic.HistoryLogic
}
// NewTxsByHashesController create a new TxsByHashesController
func NewTxsByHashesController(db *gorm.DB, redisClient *redis.Client) *TxsByHashesController {
return &TxsByHashesController{
historyLogic: logic.NewHistoryLogic(db, redisClient),
}
}
// PostQueryTxsByHashes query the txs by hashes
func (c *TxsByHashesController) PostQueryTxsByHashes(ctx *gin.Context) {
var req types.QueryByHashRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
return
}
results, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
if err != nil {
types.RenderFailure(ctx, types.ErrGetTxsByHashError, err)
return
}
resultData := &types.ResultData{Results: results, Total: uint64(len(results))}
types.RenderSuccess(ctx, resultData)
}

View File

@@ -63,7 +63,7 @@ func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
// Start starts the L1 message fetching process.
func (c *L1MessageFetcher) Start() {
messageSyncedHeight, batchSyncedHeight, bridgeBatchDepositSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
messageSyncedHeight, batchSyncedHeight, dbErr := c.eventUpdateLogic.GetL1SyncHeight(c.ctx)
if dbErr != nil {
log.Crit("L1MessageFetcher start failed", "err", dbErr)
}
@@ -72,11 +72,6 @@ func (c *L1MessageFetcher) Start() {
if batchSyncedHeight > l1SyncHeight {
l1SyncHeight = batchSyncedHeight
}
if bridgeBatchDepositSyncedHeight > l1SyncHeight {
l1SyncHeight = bridgeBatchDepositSyncedHeight
}
if c.cfg.StartHeight > l1SyncHeight {
l1SyncHeight = c.cfg.StartHeight - 1
}
@@ -96,13 +91,7 @@ func (c *L1MessageFetcher) Start() {
c.updateL1SyncHeight(l1SyncHeight, header.Hash())
log.Info("Start L1 message fetcher",
"message synced height", messageSyncedHeight,
"batch synced height", batchSyncedHeight,
"bridge batch deposit height", bridgeBatchDepositSyncedHeight,
"config start height", c.cfg.StartHeight,
"sync start height", c.l1SyncHeight+1,
)
log.Info("Start L1 message fetcher", "message synced height", messageSyncedHeight, "batch synced height", batchSyncedHeight, "config start height", c.cfg.StartHeight, "sync start height", c.l1SyncHeight+1)
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
go func() {

View File

@@ -64,17 +64,13 @@ func NewL2MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
// Start starts the L2 message fetching process.
func (c *L2MessageFetcher) Start() {
l2SentMessageSyncedHeight, l2BridgeBatchDepositSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
l2SentMessageSyncedHeight, dbErr := c.eventUpdateLogic.GetL2MessageSyncedHeightInDB(c.ctx)
if dbErr != nil {
log.Crit("failed to get L2 cross message processed height", "err", dbErr)
return
}
l2SyncHeight := l2SentMessageSyncedHeight
if l2BridgeBatchDepositSyncedHeight > l2SyncHeight {
l2SyncHeight = l2BridgeBatchDepositSyncedHeight
}
// Sync from an older block to prevent reorg during restart.
if l2SyncHeight < logic.L2ReorgSafeDepth {
l2SyncHeight = 0
@@ -90,8 +86,7 @@ func (c *L2MessageFetcher) Start() {
c.updateL2SyncHeight(l2SyncHeight, header.Hash())
log.Info("Start L2 message fetcher", "l2 sent message synced height", l2SentMessageSyncedHeight,
"bridge batch deposit synced height", l2BridgeBatchDepositSyncedHeight, "sync start height", l2SyncHeight+1)
log.Info("Start L2 message fetcher", "message synced height", l2SentMessageSyncedHeight, "sync start height", l2SyncHeight+1)
tick := time.NewTicker(time.Duration(c.cfg.BlockTime) * time.Second)
go func() {
@@ -146,11 +141,6 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
return
}
if updateErr := c.eventUpdateLogic.UpdateL2BridgeBatchDepositEvent(c.ctx, l2FetcherResult.BridgeBatchDepositMessage); updateErr != nil {
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
return
}
c.updateL2SyncHeight(to, lastBlockHash)
c.l2MessageFetcherRunningTotal.Inc()
}

View File

@@ -11,16 +11,14 @@ import (
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
// EventUpdateLogic the logic of insert/update the database
type EventUpdateLogic struct {
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
bridgeBatchDepositEventOrm *orm.BridgeBatchDepositEvent
db *gorm.DB
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight prometheus.Gauge
eventUpdateLogicL2MessageNonceUpdateHeight prometheus.Gauge
@@ -29,10 +27,9 @@ type EventUpdateLogic struct {
// NewEventUpdateLogic creates a EventUpdateLogic instance
func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
b := &EventUpdateLogic{
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
bridgeBatchDepositEventOrm: orm.NewBridgeBatchDepositEvent(db),
db: db,
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
}
if !isL1 {
@@ -51,42 +48,30 @@ func NewEventUpdateLogic(db *gorm.DB, isL1 bool) *EventUpdateLogic {
}
// GetL1SyncHeight gets the l1 sync height from db
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, uint64, error) {
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, btypes.MessageTypeL1SentMessage)
func (b *EventUpdateLogic) GetL1SyncHeight(ctx context.Context) (uint64, uint64, error) {
messageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL1SentMessage)
if err != nil {
log.Error("failed to get L1 cross message synced height", "error", err)
return 0, 0, 0, err
return 0, 0, err
}
batchSyncedHeight, err := b.batchEventOrm.GetBatchEventSyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get L1 batch event synced height", "error", err)
return 0, 0, 0, err
return 0, 0, err
}
bridgeBatchDepositSyncedHeight, err := b.bridgeBatchDepositEventOrm.GetMessageL1SyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get l1 bridge batch deposit synced height", "error", err)
return 0, 0, 0, err
}
return messageSyncedHeight, batchSyncedHeight, bridgeBatchDepositSyncedHeight, nil
return messageSyncedHeight, batchSyncedHeight, nil
}
// GetL2MessageSyncedHeightInDB gets L2 messages synced height
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, uint64, error) {
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, btypes.MessageTypeL2SentMessage)
func (b *EventUpdateLogic) GetL2MessageSyncedHeightInDB(ctx context.Context) (uint64, error) {
l2SentMessageSyncedHeight, err := b.crossMessageOrm.GetMessageSyncedHeightInDB(ctx, orm.MessageTypeL2SentMessage)
if err != nil {
log.Error("failed to get L2 cross message processed height", "err", err)
return 0, 0, err
return 0, err
}
l2BridgeBatchDepositSyncHeight, err := b.bridgeBatchDepositEventOrm.GetMessageL2SyncedHeightInDB(ctx)
if err != nil {
log.Error("failed to get bridge batch deposit processed height", "err", err)
return 0, 0, err
}
return l2SentMessageSyncedHeight, l2BridgeBatchDepositSyncHeight, nil
return l2SentMessageSyncedHeight, nil
}
// L1InsertOrUpdate inserts or updates l1 messages
@@ -115,12 +100,6 @@ func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult
log.Error("failed to insert failed L1 gateway transactions", "err", err)
return err
}
if err := b.bridgeBatchDepositEventOrm.InsertOrUpdateL1BridgeBatchDepositEvent(ctx, l1FetcherResult.BridgeBatchDepositEvents); err != nil {
log.Error("failed to insert L1 bridge batch deposit transactions", "err", err)
return err
}
return nil
}
@@ -160,7 +139,7 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
for i, message := range l2WithdrawMessages {
message.MerkleProof = proofs[i]
message.RollupStatus = int(btypes.RollupStatusTypeFinalized)
message.RollupStatus = int(orm.RollupStatusTypeFinalized)
message.BatchIndex = batchIndex
}
@@ -196,30 +175,6 @@ func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, heig
return nil
}
// UpdateL2BridgeBatchDepositEvent update l2 bridge batch deposit status
func (b *EventUpdateLogic) UpdateL2BridgeBatchDepositEvent(ctx context.Context, l2BatchDistributes []*orm.BridgeBatchDepositEvent) error {
distributeFailedMap := make(map[uint64][]string)
for _, l2BatchDistribute := range l2BatchDistributes {
if btypes.TxStatusType(l2BatchDistribute.TxStatus) == btypes.TxStatusBridgeBatchDistributeFailed {
distributeFailedMap[l2BatchDistribute.BatchIndex] = append(distributeFailedMap[l2BatchDistribute.BatchIndex], l2BatchDistribute.Sender)
}
if err := b.bridgeBatchDepositEventOrm.UpdateBatchEventStatus(ctx, l2BatchDistribute); err != nil {
log.Error("failed to update L1 bridge batch distribute event", "batchIndex", l2BatchDistribute.BatchIndex, "err", err)
return err
}
}
for batchIndex, distributeFailedSenders := range distributeFailedMap {
if err := b.bridgeBatchDepositEventOrm.UpdateDistributeFailedStatus(ctx, batchIndex, distributeFailedSenders); err != nil {
log.Error("failed to update L1 bridge batch distribute failed event", "batchIndex", batchIndex, "failed senders", distributeFailedSenders, "err", err)
return err
}
}
return nil
}
// L2InsertOrUpdate inserts or updates L2 messages
func (b *EventUpdateLogic) L2InsertOrUpdate(ctx context.Context, l2FetcherResult *L2FilterResult) error {
if err := b.crossMessageOrm.InsertOrUpdateL2Messages(ctx, l2FetcherResult.WithdrawMessages); err != nil {

View File

@@ -16,7 +16,6 @@ import (
"scroll-tech/bridge-history-api/internal/orm"
"scroll-tech/bridge-history-api/internal/types"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -36,23 +35,20 @@ const (
// HistoryLogic services.
type HistoryLogic struct {
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
bridgeBatchDepositOrm *orm.BridgeBatchDepositEvent
redis *redis.Client
singleFlight singleflight.Group
cacheMetrics *cacheMetrics
crossMessageOrm *orm.CrossMessage
batchEventOrm *orm.BatchEvent
redis *redis.Client
singleFlight singleflight.Group
cacheMetrics *cacheMetrics
}
// NewHistoryLogic returns bridge history services.
func NewHistoryLogic(db *gorm.DB, redis *redis.Client) *HistoryLogic {
logic := &HistoryLogic{
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
bridgeBatchDepositOrm: orm.NewBridgeBatchDepositEvent(db),
redis: redis,
cacheMetrics: initCacheMetrics(),
crossMessageOrm: orm.NewCrossMessage(db),
batchEventOrm: orm.NewBatchEvent(db),
redis: redis,
cacheMetrics: initCacheMetrics(),
}
return logic
}
@@ -76,28 +72,25 @@ func (h *HistoryLogic) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, a
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetL2UnclaimedWithdrawalsByAddress(ctx, address)
if err != nil {
return nil, err
}
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
return txHistoryInfos, nil
return messages, nil
})
if err != nil {
log.Error("failed to get L2 claimable withdrawals by address", "address", address, "error", err)
return nil, 0, err
}
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
messages, ok := result.([]*orm.CrossMessage)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
}
// GetL2WithdrawalsByAddress gets all withdrawal txs under given address.
@@ -119,28 +112,25 @@ func (h *HistoryLogic) GetL2WithdrawalsByAddress(ctx context.Context, address st
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetL2WithdrawalsByAddress(ctx, address)
if err != nil {
return nil, err
}
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
return txHistoryInfos, nil
return messages, nil
})
if err != nil {
log.Error("failed to get L2 withdrawals by address", "address", address, "error", err)
return nil, 0, err
}
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
messages, ok := result.([]*orm.CrossMessage)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
}
// GetTxsByAddress gets tx infos under given address.
@@ -162,36 +152,25 @@ func (h *HistoryLogic) GetTxsByAddress(ctx context.Context, address string, page
log.Info("cache miss", "cache key", cacheKey)
result, err, _ := h.singleFlight.Do(cacheKey, func() (interface{}, error) {
var txHistoryInfos []*types.TxHistoryInfo
crossMessages, getErr := h.crossMessageOrm.GetTxsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
var messages []*orm.CrossMessage
messages, err = h.crossMessageOrm.GetTxsByAddress(ctx, address)
if err != nil {
return nil, err
}
for _, message := range crossMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromCrossMessage(message))
}
batchDepositMessages, getErr := h.bridgeBatchDepositOrm.GetTxsByAddress(ctx, address)
if getErr != nil {
return nil, getErr
}
for _, message := range batchDepositMessages {
txHistoryInfos = append(txHistoryInfos, getTxHistoryInfoFromBridgeBatchDepositMessage(message))
}
return txHistoryInfos, nil
return messages, nil
})
if err != nil {
log.Error("failed to get txs by address", "address", address, "error", err)
return nil, 0, err
}
txHistoryInfos, ok := result.([]*types.TxHistoryInfo)
messages, ok := result.([]*orm.CrossMessage)
if !ok {
log.Error("unexpected type", "expected", "[]*types.TxHistoryInfo", "got", reflect.TypeOf(result), "address", address)
return nil, 0, errors.New("unexpected error")
}
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, txHistoryInfos, page, pageSize)
return h.processAndCacheTxHistoryInfo(ctx, cacheKey, messages, page, pageSize)
}
// GetTxsByHashes gets tx infos under given tx hashes.
@@ -239,24 +218,15 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([
}
if len(uncachedHashes) > 0 {
messages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
var txHistories []*types.TxHistoryInfo
crossMessages, err := h.crossMessageOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get cross messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
for _, message := range crossMessages {
txHistories = append(txHistories, getTxHistoryInfoFromCrossMessage(message))
}
batchDepositMessages, err := h.bridgeBatchDepositOrm.GetMessagesByTxHashes(ctx, uncachedHashes)
if err != nil {
log.Error("failed to get batch deposit messages by tx hashes", "hashes", uncachedHashes)
return nil, err
}
for _, message := range batchDepositMessages {
txHistories = append(txHistories, getTxHistoryInfoFromBridgeBatchDepositMessage(message))
for _, message := range messages {
txHistories = append(txHistories, getTxHistoryInfo(message))
}
resultMap := make(map[string]*types.TxHistoryInfo)
@@ -290,19 +260,19 @@ func (h *HistoryLogic) GetTxsByHashes(ctx context.Context, txHashes []string) ([
return results, nil
}
func getTxHistoryInfoFromCrossMessage(message *orm.CrossMessage) *types.TxHistoryInfo {
func getTxHistoryInfo(message *orm.CrossMessage) *types.TxHistoryInfo {
txHistory := &types.TxHistoryInfo{
MessageHash: message.MessageHash,
TokenType: btypes.TokenType(message.TokenType),
TokenType: orm.TokenType(message.TokenType),
TokenIDs: utils.ConvertStringToStringArray(message.TokenIDs),
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmounts),
L1TokenAddress: message.L1TokenAddress,
L2TokenAddress: message.L2TokenAddress,
MessageType: btypes.MessageType(message.MessageType),
TxStatus: btypes.TxStatusType(message.TxStatus),
MessageType: orm.MessageType(message.MessageType),
TxStatus: orm.TxStatusType(message.TxStatus),
BlockTimestamp: message.BlockTimestamp,
}
if txHistory.MessageType == btypes.MessageTypeL1SentMessage {
if txHistory.MessageType == orm.MessageTypeL1SentMessage {
txHistory.Hash = message.L1TxHash
txHistory.ReplayTxHash = message.L1ReplayTxHash
txHistory.RefundTxHash = message.L1RefundTxHash
@@ -318,7 +288,7 @@ func getTxHistoryInfoFromCrossMessage(message *orm.CrossMessage) *types.TxHistor
Hash: message.L1TxHash,
BlockNumber: message.L1BlockNumber,
}
if btypes.RollupStatusType(message.RollupStatus) == btypes.RollupStatusTypeFinalized {
if orm.RollupStatusType(message.RollupStatus) == orm.RollupStatusTypeFinalized {
txHistory.ClaimInfo = &types.ClaimInfo{
From: message.MessageFrom,
To: message.MessageTo,
@@ -336,28 +306,6 @@ func getTxHistoryInfoFromCrossMessage(message *orm.CrossMessage) *types.TxHistor
return txHistory
}
func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepositEvent) *types.TxHistoryInfo {
txHistory := &types.TxHistoryInfo{
Hash: message.L1TxHash,
TokenType: btypes.TokenType(message.TokenType),
TokenAmounts: utils.ConvertStringToStringArray(message.TokenAmount),
BlockNumber: message.L1BlockNumber,
MessageType: btypes.MessageTypeL1BatchDeposit,
TxStatus: btypes.TxStatusType(message.TxStatus),
CounterpartChainTx: &types.CounterpartChainTx{
Hash: message.L2TxHash,
BlockNumber: message.L2BlockNumber,
},
BlockTimestamp: message.BlockTimestamp,
BatchDepositFee: message.Fee,
}
if txHistory.TokenType != btypes.TokenTypeETH {
txHistory.L1TokenAddress = message.L1TokenAddress
txHistory.L2TokenAddress = message.L2TokenAddress
}
return txHistory
}
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
start := int64((pageNum - 1) * pageSize)
end := start + int64(pageSize) - 1
@@ -372,7 +320,7 @@ func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pa
return nil, 0, false, nil
}
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
values, err := h.redis.ZRange(ctx, cacheKey, start, end).Result()
if err != nil {
log.Error("failed to get zrange result", "error", err)
return nil, 0, false, err
@@ -408,13 +356,13 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
}
} else {
// The transactions are sorted, thus we set the score as their indices.
for _, tx := range txs {
for i, tx := range txs {
txBytes, err := json.Marshal(tx)
if err != nil {
log.Error("failed to marshal transaction to json", "error", err)
return err
}
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(tx.BlockTimestamp), Member: txBytes}).Err(); err != nil {
if err := pipe.ZAdd(ctx, cacheKey, &redis.Z{Score: float64(i), Member: txBytes}).Err(); err != nil {
log.Error("failed to add transaction to sorted set", "error", err)
return err
}
@@ -433,7 +381,12 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
return nil
}
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, txHistories []*types.TxHistoryInfo, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKey string, messages []*orm.CrossMessage, page, pageSize uint64) ([]*types.TxHistoryInfo, uint64, error) {
var txHistories []*types.TxHistoryInfo
for _, message := range messages {
txHistories = append(txHistories, getTxHistoryInfo(message))
}
err := h.cacheTxsInfo(ctx, cacheKey, txHistories)
if err != nil {
log.Error("failed to cache txs info", "key", cacheKey, "err", err)

View File

@@ -13,7 +13,6 @@ import (
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -31,60 +30,8 @@ func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1Ev
}
}
// ParseL1CrossChainEventLogs parse l1 cross chain event logs
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, []*orm.BridgeBatchDepositEvent, error) {
l1CrossChainDepositMessages, l1CrossChainRelayedMessages, err := e.ParseL1SingleCrossChainEventLogs(ctx, logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
l1BridgeBatchDepositMessages, err := e.ParseL1BridgeBatchDepositCrossChainEventLogs(logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
return l1CrossChainDepositMessages, l1CrossChainRelayedMessages, l1BridgeBatchDepositMessages, nil
}
// ParseL1BridgeBatchDepositCrossChainEventLogs parse L1 watched batch bridge cross chain events.
func (e *L1EventParser) ParseL1BridgeBatchDepositCrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.BridgeBatchDepositEvent, error) {
var l1BridgeBatchDepositMessages []*orm.BridgeBatchDepositEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L1BridgeBatchDepositSig:
event := backendabi.L1BatchBridgeGatewayDeposit{}
if err := utils.UnpackLog(backendabi.L1BatchBridgeGatewayABI, &event, "Deposit", vlog); err != nil {
log.Error("Failed to unpack batch bridge gateway deposit event", "err", err)
return nil, err
}
var tokenType btypes.TokenType
if event.Token == common.HexToAddress("0") {
tokenType = btypes.TokenTypeETH
} else {
tokenType = btypes.TokenTypeERC20
}
l1BridgeBatchDepositMessages = append(l1BridgeBatchDepositMessages, &orm.BridgeBatchDepositEvent{
TokenType: int(tokenType),
Sender: event.Sender.String(),
BatchIndex: event.BatchIndex.Uint64(),
TokenAmount: event.Amount.String(),
Fee: event.Fee.String(),
L1TokenAddress: event.Token.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDeposit),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
L1LogIndex: vlog.Index,
})
}
}
return l1BridgeBatchDepositMessages, nil
}
// ParseL1SingleCrossChainEventLogs parses L1 watched single cross chain events.
func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
// ParseL1CrossChainEventLogs parses L1 watched cross chain events.
func (e *L1EventParser) ParseL1CrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l1DepositMessages []*orm.CrossMessage
var l1RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
@@ -98,7 +45,7 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeETH)
lastMessage.TokenType = int(orm.TokenTypeETH)
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L1DepositERC20Sig:
event := backendabi.ERC20MessageEvent{}
@@ -110,7 +57,7 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC20)
lastMessage.TokenType = int(orm.TokenTypeERC20)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenAmounts = event.Amount.String()
@@ -123,7 +70,7 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -136,7 +83,7 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
@@ -149,7 +96,7 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -163,7 +110,7 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
@@ -183,12 +130,12 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
L1BlockNumber: vlog.BlockNumber,
Sender: from,
Receiver: event.Target.String(),
TokenType: int(btypes.TokenTypeETH),
TokenType: int(orm.TokenTypeETH),
L1TxHash: vlog.TxHash.String(),
TokenAmounts: event.Value.String(),
MessageNonce: event.MessageNonce.Uint64(),
MessageType: int(btypes.MessageTypeL1SentMessage),
TxStatus: int(btypes.TxStatusTypeSent),
MessageType: int(orm.MessageTypeL1SentMessage),
TxStatus: int(orm.TxStatusTypeSent),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
})
@@ -202,8 +149,8 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
MessageHash: event.MessageHash.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeRelayed),
MessageType: int(btypes.MessageTypeL2SentMessage),
TxStatus: int(orm.TxStatusTypeRelayed),
MessageType: int(orm.MessageTypeL2SentMessage),
})
case backendabi.L1FailedRelayedMessageEventSig:
event := backendabi.L1FailedRelayedMessageEvent{}
@@ -215,8 +162,8 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
MessageHash: event.MessageHash.String(),
L1BlockNumber: vlog.BlockNumber,
L1TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeFailedRelayed),
MessageType: int(btypes.MessageTypeL2SentMessage),
TxStatus: int(orm.TxStatusTypeFailedRelayed),
MessageType: int(orm.MessageTypeL2SentMessage),
})
}
}
@@ -245,7 +192,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeCommitted),
BatchStatus: int(orm.BatchStatusTypeCommitted),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
StartBlockNumber: startBlock,
@@ -259,7 +206,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeReverted),
BatchStatus: int(orm.BatchStatusTypeReverted),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
@@ -271,7 +218,7 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
return nil, err
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeFinalized),
BatchStatus: int(orm.BatchStatusTypeFinalized),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
@@ -301,7 +248,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
// If the message hash is not found in the map, it's not a replayMessage or enforced tx (omitted); add it to the events.
if _, exists := messageHashes[messageHash]; !exists {
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeQueueTransaction,
EventType: orm.MessageQueueEventTypeQueueTransaction,
QueueIndex: event.QueueIndex,
MessageHash: messageHash,
TxHash: vlog.TxHash,
@@ -316,7 +263,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
skippedIndices := utils.GetSkippedQueueIndices(event.StartIndex.Uint64(), event.SkippedBitmap)
for _, index := range skippedIndices {
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeDequeueTransaction,
EventType: orm.MessageQueueEventTypeDequeueTransaction,
QueueIndex: index,
})
}
@@ -327,7 +274,7 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeDropTransaction,
EventType: orm.MessageQueueEventTypeDropTransaction,
QueueIndex: event.Index.Uint64(),
TxHash: vlog.TxHash,
})

View File

@@ -16,7 +16,6 @@ import (
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -26,12 +25,11 @@ const L1ReorgSafeDepth = 64
// L1FilterResult L1 fetcher result
type L1FilterResult struct {
DepositMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage
BatchEvents []*orm.BatchEvent
MessageQueueEvents []*orm.MessageQueueEvent
RevertedTxs []*orm.CrossMessage
BridgeBatchDepositEvents []*orm.BridgeBatchDepositEvent
DepositMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage
BatchEvents []*orm.BatchEvent
MessageQueueEvents []*orm.MessageQueueEvent
RevertedTxs []*orm.CrossMessage
}
// L1FetcherLogic the L1 fetcher logic
@@ -84,7 +82,7 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.GatewayRouterAddr),
}
// Optional gateways.
// Optional erc20 gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
@@ -100,11 +98,6 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
if common.HexToAddress(cfg.BatchBridgeGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L1FetcherLogic{
@@ -190,12 +183,12 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
L1TxHash: tx.Hash().String(),
MessageType: int(btypes.MessageTypeL1SentMessage),
MessageType: int(orm.MessageTypeL1SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L1BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(btypes.TxStatusTypeSentTxReverted),
TxStatus: int(orm.TxStatusTypeSentTxReverted),
})
}
}
@@ -210,7 +203,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 14)
query.Topics[0] = make([]common.Hash, 13)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1DepositERC721Sig
@@ -224,7 +217,6 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
query.Topics[0][13] = backendabi.L1BridgeBatchDepositSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
@@ -260,7 +252,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
return false, 0, common.Hash{}, nil, err
}
l1DepositMessages, l1RelayedMessages, l1BridgeBatchDepositMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
l1DepositMessages, l1RelayedMessages, err := f.parser.ParseL1CrossChainEventLogs(ctx, eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L1 cross chain event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
@@ -279,12 +271,11 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
}
res := L1FilterResult{
DepositMessages: l1DepositMessages,
RelayedMessages: l1RelayedMessages,
BatchEvents: l1BatchEvents,
MessageQueueEvents: l1MessageQueueEvents,
RevertedTxs: l1RevertedTxs,
BridgeBatchDepositEvents: l1BridgeBatchDepositMessages,
DepositMessages: l1DepositMessages,
RelayedMessages: l1RelayedMessages,
BatchEvents: l1BatchEvents,
MessageQueueEvents: l1MessageQueueEvents,
RevertedTxs: l1RevertedTxs,
}
f.updateMetrics(res)
@@ -296,23 +287,23 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_gateway_router_transaction").Add(float64(len(res.RevertedTxs)))
for _, depositMessage := range res.DepositMessages {
switch btypes.TokenType(depositMessage.TokenType) {
case btypes.TokenTypeETH:
switch orm.TokenType(depositMessage.TokenType) {
case orm.TokenTypeETH:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_eth").Add(1)
case btypes.TokenTypeERC20:
case orm.TokenTypeERC20:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc20").Add(1)
case btypes.TokenTypeERC721:
case orm.TokenTypeERC721:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc721").Add(1)
case btypes.TokenTypeERC1155:
case orm.TokenTypeERC1155:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_deposit_erc1155").Add(1)
}
}
for _, relayedMessage := range res.RelayedMessages {
switch btypes.TxStatusType(relayedMessage.TxStatus) {
case btypes.TxStatusTypeRelayed:
switch orm.TxStatusType(relayedMessage.TxStatus) {
case orm.TxStatusTypeRelayed:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_relayed_message").Add(1)
case btypes.TxStatusTypeFailedRelayed:
case orm.TxStatusTypeFailedRelayed:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_failed_relayed_message").Add(1)
}
// Have not tracked L1 relayed message reverted transaction yet.
@@ -321,33 +312,24 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
}
for _, batchEvent := range res.BatchEvents {
switch btypes.BatchStatusType(batchEvent.BatchStatus) {
case btypes.BatchStatusTypeCommitted:
switch orm.BatchStatusType(batchEvent.BatchStatus) {
case orm.BatchStatusTypeCommitted:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_commit_batch_event").Add(1)
case btypes.BatchStatusTypeReverted:
case orm.BatchStatusTypeReverted:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_revert_batch_event").Add(1)
case btypes.BatchStatusTypeFinalized:
case orm.BatchStatusTypeFinalized:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_finalize_batch_event").Add(1)
}
}
for _, messageQueueEvent := range res.MessageQueueEvents {
switch messageQueueEvent.EventType {
case btypes.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
case orm.MessageQueueEventTypeQueueTransaction: // sendMessage is filtered out, only leaving replayMessage or appendEnforcedTransaction.
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_replay_message_or_enforced_transaction").Add(1)
case btypes.MessageQueueEventTypeDequeueTransaction:
case orm.MessageQueueEventTypeDequeueTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1)
case btypes.MessageQueueEventTypeDropTransaction:
case orm.MessageQueueEventTypeDropTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1)
}
}
for _, bridgeBatchDepositEvent := range res.BridgeBatchDepositEvents {
switch btypes.TokenType(bridgeBatchDepositEvent.TokenType) {
case btypes.TokenTypeETH:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_bridge_batch_deposit_eth").Add(1)
case btypes.TokenTypeERC20:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_bridge_batch_deposit_erc20").Add(1)
}
}
}

View File

@@ -3,7 +3,6 @@ package logic
import (
"context"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
@@ -12,7 +11,6 @@ import (
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -30,72 +28,8 @@ func NewL2EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L2Ev
}
}
// ParseL2EventLogs parses L2 watchedevents
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, []*orm.BridgeBatchDepositEvent, error) {
l2WithdrawMessages, l2RelayedMessages, err := e.ParseL2SingleCrossChainEventLogs(ctx, logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
l2BridgeBatchDepositMessages, err := e.ParseL2BridgeBatchDepositCrossChainEventLogs(logs, blockTimestampsMap)
if err != nil {
return nil, nil, nil, err
}
return l2WithdrawMessages, l2RelayedMessages, l2BridgeBatchDepositMessages, nil
}
// ParseL2BridgeBatchDepositCrossChainEventLogs parses L2 watched bridge batch deposit events
func (e *L2EventParser) ParseL2BridgeBatchDepositCrossChainEventLogs(logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.BridgeBatchDepositEvent, error) {
var l2BridgeBatchDepositEvents []*orm.BridgeBatchDepositEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
case backendabi.L2BridgeBatchDistributeSig:
event := backendabi.L2BatchBridgeGatewayBatchDistribute{}
err := utils.UnpackLog(backendabi.L2BatchBridgeGatewayABI, &event, "BatchDistribute", vlog)
if err != nil {
log.Error("Failed to unpack BatchDistribute event", "err", err)
return nil, err
}
var tokenType btypes.TokenType
if event.L1Token == common.HexToAddress("0") {
tokenType = btypes.TokenTypeETH
} else {
tokenType = btypes.TokenTypeERC20
}
l2BridgeBatchDepositEvents = append(l2BridgeBatchDepositEvents, &orm.BridgeBatchDepositEvent{
TokenType: int(tokenType),
BatchIndex: event.BatchIndex.Uint64(),
L2TokenAddress: event.L2Token.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDistribute),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
})
case backendabi.L2BridgeBatchDistributeFailedSig:
event := backendabi.L2BatchBridgeGatewayDistributeFailed{}
err := utils.UnpackLog(backendabi.L2BatchBridgeGatewayABI, &event, "DistributeFailed", vlog)
if err != nil {
log.Error("Failed to unpack DistributeFailed event", "err", err)
return nil, err
}
l2BridgeBatchDepositEvents = append(l2BridgeBatchDepositEvents, &orm.BridgeBatchDepositEvent{
BatchIndex: event.BatchIndex.Uint64(),
L2TokenAddress: event.L2Token.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusBridgeBatchDistributeFailed),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
Sender: event.Receiver.String(),
})
}
}
return l2BridgeBatchDepositEvents, nil
}
// ParseL2SingleCrossChainEventLogs parses L2 watched events
func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
// ParseL2EventLogs parses L2 watched events
func (e *L2EventParser) ParseL2EventLogs(ctx context.Context, logs []types.Log, blockTimestampsMap map[uint64]uint64) ([]*orm.CrossMessage, []*orm.CrossMessage, error) {
var l2WithdrawMessages []*orm.CrossMessage
var l2RelayedMessages []*orm.CrossMessage
for _, vlog := range logs {
@@ -110,7 +44,7 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeETH)
lastMessage.TokenType = int(orm.TokenTypeETH)
lastMessage.TokenAmounts = event.Amount.String()
case backendabi.L2WithdrawERC20Sig:
event := backendabi.ERC20MessageEvent{}
@@ -122,7 +56,7 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC20)
lastMessage.TokenType = int(orm.TokenTypeERC20)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenAmounts = event.Amount.String()
@@ -136,7 +70,7 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -150,7 +84,7 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC721)
lastMessage.TokenType = int(orm.TokenTypeERC721)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
@@ -164,7 +98,7 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = event.TokenID.String()
@@ -179,7 +113,7 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage := l2WithdrawMessages[len(l2WithdrawMessages)-1]
lastMessage.Sender = event.From.String()
lastMessage.Receiver = event.To.String()
lastMessage.TokenType = int(btypes.TokenTypeERC1155)
lastMessage.TokenType = int(orm.TokenTypeERC1155)
lastMessage.L1TokenAddress = event.L1Token.String()
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
@@ -200,7 +134,7 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
MessageHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message).String(),
Sender: from,
Receiver: event.Target.String(),
TokenType: int(btypes.TokenTypeETH),
TokenType: int(orm.TokenTypeETH),
L2TxHash: vlog.TxHash.String(),
TokenAmounts: event.Value.String(),
MessageFrom: event.Sender.String(),
@@ -208,8 +142,8 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
MessageValue: event.Value.String(),
MessageNonce: event.MessageNonce.Uint64(),
MessageData: hexutil.Encode(event.Message),
MessageType: int(btypes.MessageTypeL2SentMessage),
TxStatus: int(btypes.TxStatusTypeSent),
MessageType: int(orm.MessageTypeL2SentMessage),
TxStatus: int(orm.TxStatusTypeSent),
BlockTimestamp: blockTimestampsMap[vlog.BlockNumber],
L2BlockNumber: vlog.BlockNumber,
})
@@ -224,8 +158,8 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
MessageHash: event.MessageHash.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeRelayed),
MessageType: int(btypes.MessageTypeL1SentMessage),
TxStatus: int(orm.TxStatusTypeRelayed),
MessageType: int(orm.MessageTypeL1SentMessage),
})
case backendabi.L2FailedRelayedMessageEventSig:
event := backendabi.L2RelayedMessageEvent{}
@@ -238,8 +172,8 @@ func (e *L2EventParser) ParseL2SingleCrossChainEventLogs(ctx context.Context, lo
MessageHash: event.MessageHash.String(),
L2BlockNumber: vlog.BlockNumber,
L2TxHash: vlog.TxHash.String(),
TxStatus: int(btypes.TxStatusTypeFailedRelayed),
MessageType: int(btypes.MessageTypeL1SentMessage),
TxStatus: int(orm.TxStatusTypeFailedRelayed),
MessageType: int(orm.MessageTypeL1SentMessage),
})
}
}

View File

@@ -17,7 +17,6 @@ import (
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
"scroll-tech/bridge-history-api/internal/orm"
btypes "scroll-tech/bridge-history-api/internal/types"
"scroll-tech/bridge-history-api/internal/utils"
)
@@ -27,10 +26,9 @@ const L2ReorgSafeDepth = 256
// L2FilterResult the L2 filter result
type L2FilterResult struct {
WithdrawMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
BridgeBatchDepositMessage []*orm.BridgeBatchDepositEvent
WithdrawMessages []*orm.CrossMessage
RelayedMessages []*orm.CrossMessage // relayed, failed relayed, relay tx reverted.
OtherRevertedTxs []*orm.CrossMessage // reverted txs except relay tx reverted.
}
// L2FetcherLogic the L2 fetcher logic
@@ -79,7 +77,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.GatewayRouterAddr),
}
// Optional gateways.
// Optional erc20 gateways.
if common.HexToAddress(cfg.USDCGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
@@ -95,11 +93,6 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
if common.HexToAddress(cfg.BatchBridgeGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L2FetcherLogic{
@@ -171,9 +164,9 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
l2RevertedRelayedMessageTxs = append(l2RevertedRelayedMessageTxs, &orm.CrossMessage{
MessageHash: common.BytesToHash(crypto.Keccak256(tx.AsL1MessageTx().Data)).String(),
L2TxHash: tx.Hash().String(),
TxStatus: int(btypes.TxStatusTypeRelayTxReverted),
TxStatus: int(orm.TxStatusTypeRelayTxReverted),
L2BlockNumber: receipt.BlockNumber.Uint64(),
MessageType: int(btypes.MessageTypeL1SentMessage),
MessageType: int(orm.MessageTypeL1SentMessage),
})
}
continue
@@ -201,12 +194,12 @@ func (f *L2FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
l2RevertedUserTxs = append(l2RevertedUserTxs, &orm.CrossMessage{
L2TxHash: tx.Hash().String(),
MessageType: int(btypes.MessageTypeL2SentMessage),
MessageType: int(orm.MessageTypeL2SentMessage),
Sender: sender.String(),
Receiver: (*tx.To()).String(),
L2BlockNumber: receipt.BlockNumber.Uint64(),
BlockTimestamp: block.Time(),
TxStatus: int(btypes.TxStatusTypeSentTxReverted),
TxStatus: int(orm.TxStatusTypeSentTxReverted),
})
}
}
@@ -221,7 +214,7 @@ func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]
Addresses: f.addressList,
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 9)
query.Topics[0] = make([]common.Hash, 7)
query.Topics[0][0] = backendabi.L2WithdrawETHSig
query.Topics[0][1] = backendabi.L2WithdrawERC20Sig
query.Topics[0][2] = backendabi.L2WithdrawERC721Sig
@@ -229,8 +222,6 @@ func (f *L2FetcherLogic) l2FetcherLogs(ctx context.Context, from, to uint64) ([]
query.Topics[0][4] = backendabi.L2SentMessageEventSig
query.Topics[0][5] = backendabi.L2RelayedMessageEventSig
query.Topics[0][6] = backendabi.L2FailedRelayedMessageEventSig
query.Topics[0][7] = backendabi.L2BridgeBatchDistributeSig
query.Topics[0][8] = backendabi.L2BridgeBatchDistributeFailedSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
@@ -266,17 +257,16 @@ func (f *L2FetcherLogic) L2Fetcher(ctx context.Context, from, to uint64, lastBlo
return false, 0, common.Hash{}, nil, err
}
l2WithdrawMessages, l2RelayedMessages, l2BridgeBatchDepositMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
l2WithdrawMessages, l2RelayedMessages, err := f.parser.ParseL2EventLogs(ctx, eventLogs, blockTimestampsMap)
if err != nil {
log.Error("failed to parse L2 event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
}
res := L2FilterResult{
WithdrawMessages: l2WithdrawMessages,
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
OtherRevertedTxs: revertedUserTxs,
BridgeBatchDepositMessage: l2BridgeBatchDepositMessages,
WithdrawMessages: l2WithdrawMessages,
RelayedMessages: append(l2RelayedMessages, revertedRelayMsgs...),
OtherRevertedTxs: revertedUserTxs,
}
f.updateMetrics(res)
@@ -288,37 +278,28 @@ func (f *L2FetcherLogic) updateMetrics(res L2FilterResult) {
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_gateway_router_transaction").Add(float64(len(res.OtherRevertedTxs)))
for _, withdrawMessage := range res.WithdrawMessages {
switch btypes.TokenType(withdrawMessage.TokenType) {
case btypes.TokenTypeETH:
switch orm.TokenType(withdrawMessage.TokenType) {
case orm.TokenTypeETH:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_eth").Add(1)
case btypes.TokenTypeERC20:
case orm.TokenTypeERC20:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc20").Add(1)
case btypes.TokenTypeERC721:
case orm.TokenTypeERC721:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc721").Add(1)
case btypes.TokenTypeERC1155:
case orm.TokenTypeERC1155:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_withdraw_erc1155").Add(1)
}
}
for _, relayedMessage := range res.RelayedMessages {
switch btypes.TxStatusType(relayedMessage.TxStatus) {
case btypes.TxStatusTypeRelayed:
switch orm.TxStatusType(relayedMessage.TxStatus) {
case orm.TxStatusTypeRelayed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_relayed_message").Add(1)
case btypes.TxStatusTypeFailedRelayed:
case orm.TxStatusTypeFailedRelayed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_failed_relayed_message").Add(1)
case btypes.TxStatusTypeRelayTxReverted:
case orm.TxStatusTypeRelayTxReverted:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_reverted_relayed_message_transaction").Add(1)
}
}
for _, bridgeBatchDepositMessage := range res.BridgeBatchDepositMessage {
switch btypes.TxStatusType(bridgeBatchDepositMessage.TxStatus) {
case btypes.TxStatusBridgeBatchDistribute:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_bridge_batch_distribute_message").Add(1)
case btypes.TxStatusBridgeBatchDistributeFailed:
f.l2FetcherLogicFetchedTotal.WithLabelValues("L2_bridge_batch_distribute_failed_message").Add(1)
}
}
}
func isTransactionToGateway(tx *types.Transaction, gatewayList []common.Address) bool {

View File

@@ -7,8 +7,26 @@ import (
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
btypes "scroll-tech/bridge-history-api/internal/types"
// BatchStatusType represents the type of batch status.
type BatchStatusType int
// Constants for BatchStatusType.
const (
BatchStatusTypeUnknown BatchStatusType = iota
BatchStatusTypeCommitted
BatchStatusTypeReverted
BatchStatusTypeFinalized
)
// UpdateStatusType represents the whether batch info is updated in message table.
type UpdateStatusType int
// Constants for UpdateStatusType.
const (
UpdateStatusTypeUnupdated UpdateStatusType = iota
UpdateStatusTypeUpdated
)
// BatchEvent represents a batch event.
@@ -59,8 +77,8 @@ func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, block
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("end_block_number <= ?", blockHeight)
db = db.Where("batch_status = ?", btypes.BatchStatusTypeFinalized)
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
db = db.Where("batch_status = ?", BatchStatusTypeFinalized)
db = db.Where("update_status = ?", UpdateStatusTypeUnupdated)
db = db.Order("batch_index asc")
if err := db.Find(&batches).Error; err != nil {
if err == gorm.ErrRecordNotFound {
@@ -78,8 +96,8 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
db = db.WithContext(ctx)
db = db.Model(&BatchEvent{})
updateFields := make(map[string]interface{})
switch btypes.BatchStatusType(l1BatchEvent.BatchStatus) {
case btypes.BatchStatusTypeCommitted:
switch BatchStatusType(l1BatchEvent.BatchStatus) {
case BatchStatusTypeCommitted:
// Use the clause to either insert or ignore on conflict
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "batch_hash"}},
@@ -88,17 +106,17 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
if err := db.Create(l1BatchEvent).Error; err != nil {
return fmt.Errorf("failed to insert or ignore batch event, error: %w", err)
}
case btypes.BatchStatusTypeFinalized:
case BatchStatusTypeFinalized:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = btypes.BatchStatusTypeFinalized
updateFields["batch_status"] = BatchStatusTypeFinalized
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
case btypes.BatchStatusTypeReverted:
case BatchStatusTypeReverted:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = btypes.BatchStatusTypeReverted
updateFields["batch_status"] = BatchStatusTypeReverted
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
@@ -117,7 +135,7 @@ func (c *BatchEvent) UpdateBatchEventStatus(ctx context.Context, batchIndex uint
db = db.Model(&BatchEvent{})
db = db.Where("batch_index = ?", batchIndex)
updateFields := map[string]interface{}{
"update_status": btypes.UpdateStatusTypeUpdated,
"update_status": UpdateStatusTypeUpdated,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event status, batchIndex: %d, error: %w", batchIndex, err)

View File

@@ -1,163 +0,0 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/bridge-history-api/internal/types"
)
// BridgeBatchDepositEvent represents the bridge batch deposit event.
type BridgeBatchDepositEvent struct {
db *gorm.DB `gorm:"column:-"`
ID uint64 `json:"id" gorm:"column:id;primary_key"`
TokenType int `json:"token_type" gorm:"column:token_type"`
Sender string `json:"sender" gorm:"column:sender"`
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
TokenAmount string `json:"token_amount" gorm:"column:token_amount"`
Fee string `json:"fee" gorm:"column:fee"`
L1TokenAddress string `json:"l1_token_address" gorm:"column:l1_token_address"`
L2TokenAddress string `json:"l2_token_address" gorm:"column:l2_token_address"`
L1BlockNumber uint64 `json:"l1_block_number" gorm:"column:l1_block_number"`
L2BlockNumber uint64 `json:"l2_block_number" gorm:"column:l2_block_number"`
L1TxHash string `json:"l1_tx_hash" gorm:"column:l1_tx_hash"`
L1LogIndex uint `json:"l1_log_index" gorm:"column:l1_log_index"`
L2TxHash string `json:"l2_tx_hash" gorm:"column:l2_tx_hash"`
TxStatus int `json:"tx_status" gorm:"column:tx_status"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"column:block_timestamp"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt *time.Time `json:"deleted_at" gorm:"column:deleted_at"`
}
// TableName returns the table name for the BridgeBatchDepositEvent model.
func (*BridgeBatchDepositEvent) TableName() string {
return "bridge_batch_deposit_event_v2"
}
// NewBridgeBatchDepositEvent returns a new instance of BridgeBatchDepositEvent.
func NewBridgeBatchDepositEvent(db *gorm.DB) *BridgeBatchDepositEvent {
return &BridgeBatchDepositEvent{db: db}
}
// GetTxsByAddress returns the txs by address
func (c *BridgeBatchDepositEvent) GetTxsByAddress(ctx context.Context, sender string) ([]*BridgeBatchDepositEvent, error) {
var messages []*BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get all txs by sender address, sender: %v, error: %w", sender, err)
}
return messages, nil
}
// GetMessagesByTxHashes retrieves all BridgeBatchDepositEvent from the database that match the provided transaction hashes.
func (c *BridgeBatchDepositEvent) GetMessagesByTxHashes(ctx context.Context, txHashes []string) ([]*BridgeBatchDepositEvent, error) {
var messages []*BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("l1_tx_hash in (?) or l2_tx_hash in (?)", txHashes, txHashes)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to GetMessagesByTxHashes by tx hashes, tx hashes: %v, error: %w", txHashes, err)
}
return messages, nil
}
// GetMessageL1SyncedHeightInDB returns the l1 latest bridge batch deposit message height from the database
func (c *BridgeBatchDepositEvent) GetMessageL1SyncedHeightInDB(ctx context.Context) (uint64, error) {
var message BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Order("l1_block_number desc")
err := db.First(&message).Error
if err != nil && errors.Is(gorm.ErrRecordNotFound, err) {
return 0, nil
}
if err != nil {
return 0, fmt.Errorf("failed to get l1 latest processed height, error: %w", err)
}
return message.L1BlockNumber, nil
}
// GetMessageL2SyncedHeightInDB returns the l2 latest bridge batch deposit message height from the database
func (c *BridgeBatchDepositEvent) GetMessageL2SyncedHeightInDB(ctx context.Context) (uint64, error) {
var message BridgeBatchDepositEvent
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Order("l2_block_number desc")
err := db.First(&message).Error
if err != nil && errors.Is(gorm.ErrRecordNotFound, err) {
return 0, nil
}
if err != nil {
return 0, fmt.Errorf("failed to get l2 latest processed height, error: %w", err)
}
return message.L2BlockNumber, nil
}
// InsertOrUpdateL1BridgeBatchDepositEvent inserts or updates a new L1 BridgeBatchDepositEvent
func (c *BridgeBatchDepositEvent) InsertOrUpdateL1BridgeBatchDepositEvent(ctx context.Context, l1BatchDepositEvents []*BridgeBatchDepositEvent) error {
if len(l1BatchDepositEvents) == 0 {
return nil
}
db := c.db
db = db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "l1_tx_hash"}, {Name: "l1_log_index"}},
DoUpdates: clause.AssignmentColumns([]string{"token_amount", "fee", "l1_block_number", "l1_token_address", "tx_status", "block_timestamp"}),
})
if err := db.Create(l1BatchDepositEvents).Error; err != nil {
return fmt.Errorf("failed to insert message, error: %w", err)
}
return nil
}
// UpdateBatchEventStatus updates the tx_status of BridgeBatchDepositEvent given batch index
func (c *BridgeBatchDepositEvent) UpdateBatchEventStatus(ctx context.Context, distributeMessage *BridgeBatchDepositEvent) error {
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("batch_index = ?", distributeMessage.BatchIndex)
db = db.Where("token_type = ?", distributeMessage.TokenType)
updateFields := map[string]interface{}{
"l2_token_address": distributeMessage.L2TokenAddress,
"l2_block_number": distributeMessage.L2BlockNumber,
"l2_tx_hash": distributeMessage.L2TxHash,
"tx_status": types.TxStatusBridgeBatchDistribute,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to UpdateBatchEventStatus, batchIndex: %d, error: %w", distributeMessage.BatchIndex, err)
}
return nil
}
// UpdateDistributeFailedStatus updates the tx_status of BridgeBatchDepositEvent given batch index and senders
func (c *BridgeBatchDepositEvent) UpdateDistributeFailedStatus(ctx context.Context, batchIndex uint64, senders []string) error {
db := c.db.WithContext(ctx)
db = db.Model(&BridgeBatchDepositEvent{})
db = db.Where("batch_index = ?", batchIndex)
db = db.Where("sender in (?)", senders)
updateFields := map[string]interface{}{
"tx_status": types.TxStatusBridgeBatchDistributeFailed,
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to UpdateDistributeFailedStatus, batchIndex: %d, senders:%v, error: %w", batchIndex, senders, err)
}
return nil
}

View File

@@ -8,15 +8,75 @@ import (
"github.com/scroll-tech/go-ethereum/common"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
"scroll-tech/bridge-history-api/internal/types"
// TokenType represents the type of token.
type TokenType int
btypes "scroll-tech/bridge-history-api/internal/types"
// Constants for TokenType.
const (
TokenTypeUnknown TokenType = iota
TokenTypeETH
TokenTypeERC20
TokenTypeERC721
TokenTypeERC1155
)
// MessageType represents the type of message.
type MessageType int
// Constants for MessageType.
const (
MessageTypeUnknown MessageType = iota
MessageTypeL1SentMessage
MessageTypeL2SentMessage
)
// TxStatusType represents the status of a transaction.
type TxStatusType int
// Constants for TxStatusType.
const (
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
// from a later status (e.g., relayed) back to "sent".
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
TxStatusTypeSent TxStatusType = iota
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
TxStatusTypeRelayed // Terminal status.
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeFailedRelayed
// Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeRelayTxReverted
TxStatusTypeSkipped
TxStatusTypeDropped // Terminal status.
)
// RollupStatusType represents the status of a rollup.
type RollupStatusType int
// Constants for RollupStatusType.
const (
RollupStatusTypeUnknown RollupStatusType = iota
RollupStatusTypeFinalized // only batch finalized status is used.
)
// MessageQueueEventType represents the type of message queue event.
type MessageQueueEventType int
// Constants for MessageQueueEventType.
const (
MessageQueueEventTypeUnknown MessageQueueEventType = iota
MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction
)
// MessageQueueEvent struct represents the details of a batch event.
type MessageQueueEvent struct {
EventType btypes.MessageQueueEventType
EventType MessageQueueEventType
QueueIndex uint64
// Track replay tx hash and refund tx hash.
@@ -72,15 +132,15 @@ func NewCrossMessage(db *gorm.DB) *CrossMessage {
}
// GetMessageSyncedHeightInDB returns the latest synced cross message height from the database for a given message type.
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType btypes.MessageType) (uint64, error) {
func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageType MessageType) (uint64, error) {
var message CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", messageType)
switch {
case messageType == btypes.MessageTypeL1SentMessage:
case messageType == MessageTypeL1SentMessage:
db = db.Order("l1_block_number desc")
case messageType == btypes.MessageTypeL2SentMessage:
case messageType == MessageTypeL2SentMessage:
db = db.Order("l2_block_number desc")
}
if err := db.First(&message).Error; err != nil {
@@ -90,9 +150,9 @@ func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageTy
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
}
switch {
case messageType == btypes.MessageTypeL1SentMessage:
case messageType == MessageTypeL1SentMessage:
return message.L1BlockNumber, nil
case messageType == btypes.MessageTypeL2SentMessage:
case messageType == MessageTypeL2SentMessage:
return message.L2BlockNumber, nil
default:
return 0, fmt.Errorf("invalid message type: %v", messageType)
@@ -104,8 +164,8 @@ func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*Cro
var message CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("rollup_status = ?", RollupStatusTypeFinalized)
db = db.Order("message_nonce desc")
if err := db.First(&message).Error; err != nil {
if err == gorm.ErrRecordNotFound {
@@ -123,8 +183,8 @@ func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBl
db = db.Model(&CrossMessage{})
db = db.Where("l2_block_number >= ?", startBlock)
db = db.Where("l2_block_number <= ?", endBlock)
db = db.Where("tx_status != ?", types.TxStatusTypeSentTxReverted)
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("tx_status != ?", TxStatusTypeSentTxReverted)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Order("message_nonce asc")
if err := db.Find(&messages).Error; err != nil {
if err == gorm.ErrRecordNotFound {
@@ -152,8 +212,8 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", TxStatusTypeSent)
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
@@ -168,7 +228,7 @@ func (c *CrossMessage) GetL2WithdrawalsByAddress(ctx context.Context, sender str
var messages []*CrossMessage
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
@@ -201,22 +261,22 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Model(&CrossMessage{})
txStatusUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case btypes.MessageQueueEventTypeQueueTransaction:
case MessageQueueEventTypeQueueTransaction:
continue
case btypes.MessageQueueEventTypeDequeueTransaction:
case MessageQueueEventTypeDequeueTransaction:
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", types.TxStatusTypeRelayed)
db = db.Where("tx_status != ?", types.TxStatusTypeDropped)
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
db = db.Where("tx_status != ?", TxStatusTypeDropped)
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeSkipped
case btypes.MessageQueueEventTypeDropTransaction:
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = TxStatusTypeSkipped
case MessageQueueEventTypeDropTransaction:
// do not over-write terminal statuses.
db = db.Where("tx_status != ?", types.TxStatusTypeRelayed)
db = db.Where("tx_status != ?", types.TxStatusTypeDropped)
db = db.Where("tx_status != ?", TxStatusTypeRelayed)
db = db.Where("tx_status != ?", TxStatusTypeDropped)
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeDropped
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = TxStatusTypeDropped
}
if err := db.Updates(txStatusUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
@@ -230,9 +290,9 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Model(&CrossMessage{})
txHashUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case btypes.MessageQueueEventTypeDequeueTransaction:
case MessageQueueEventTypeDequeueTransaction:
continue
case btypes.MessageQueueEventTypeQueueTransaction:
case MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.
// replayMessage case:
// First SentMessage in L1: https://sepolia.etherscan.io/tx/0xbee4b631312448fcc2caac86e4dccf0a2ae0a88acd6c5fd8764d39d746e472eb
@@ -244,9 +304,9 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
// Ref: https://github.com/scroll-tech/scroll/blob/v4.3.44/contracts/src/L1/L1ScrollMessenger.sol#L187-L190
db = db.Where("message_hash = ?", l1MessageQueueEvent.MessageHash.String())
txHashUpdateFields["l1_replay_tx_hash"] = l1MessageQueueEvent.TxHash.String()
case btypes.MessageQueueEventTypeDropTransaction:
case MessageQueueEventTypeDropTransaction:
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
db = db.Where("message_type = ?", MessageTypeL1SentMessage)
txHashUpdateFields["l1_refund_tx_hash"] = l1MessageQueueEvent.TxHash.String()
}
if err := db.Updates(txHashUpdateFields).Error; err != nil {
@@ -260,12 +320,12 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
func (c *CrossMessage) UpdateBatchStatusOfL2Withdrawals(ctx context.Context, startBlockNumber, endBlockNumber, batchIndex uint64) error {
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("message_type = ?", MessageTypeL2SentMessage)
db = db.Where("l2_block_number >= ?", startBlockNumber)
db = db.Where("l2_block_number <= ?", endBlockNumber)
updateFields := make(map[string]interface{})
updateFields["batch_index"] = batchIndex
updateFields["rollup_status"] = btypes.RollupStatusTypeFinalized
updateFields["rollup_status"] = RollupStatusTypeFinalized
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch status of L2 sent messages, start: %v, end: %v, index: %v, error: %w", startBlockNumber, endBlockNumber, batchIndex, err)
}
@@ -402,7 +462,7 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
mergedL2RelayedMessages := make(map[string]*CrossMessage)
for _, message := range l2RelayedMessages {
if existing, found := mergedL2RelayedMessages[message.MessageHash]; found {
if types.TxStatusType(message.TxStatus) == types.TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L2BlockNumber > existing.L2BlockNumber {
mergedL2RelayedMessages[message.MessageHash] = message
}
} else {
@@ -429,8 +489,8 @@ func (c *CrossMessage) InsertOrUpdateL2RelayedMessagesOfL1Deposits(ctx context.C
Exprs: []clause.Expression{
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeDropped},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
),
},
},
@@ -460,7 +520,7 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
mergedL1RelayedMessages := make(map[string]*CrossMessage)
for _, message := range l1RelayedMessages {
if existing, found := mergedL1RelayedMessages[message.MessageHash]; found {
if types.TxStatusType(message.TxStatus) == types.TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
if TxStatusType(message.TxStatus) == TxStatusTypeRelayed || message.L1BlockNumber > existing.L1BlockNumber {
mergedL1RelayedMessages[message.MessageHash] = message
}
} else {
@@ -481,8 +541,8 @@ func (c *CrossMessage) InsertOrUpdateL1RelayedMessagesOfL2Withdrawals(ctx contex
Exprs: []clause.Expression{
clause.And(
// do not over-write terminal statuses.
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: types.TxStatusTypeDropped},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeRelayed},
clause.Neq{Column: "cross_message_v2.tx_status", Value: TxStatusTypeDropped},
),
},
},

View File

@@ -1,38 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE bridge_batch_deposit_event_v2
(
id BIGSERIAL PRIMARY KEY,
token_type SMALLINT NOT NULL,
sender VARCHAR NOT NULL,
batch_index BIGINT DEFAULT NULL,
token_amount VARCHAR NOT NULL,
fee VARCHAR NOT NULL,
l1_token_address VARCHAR DEFAULT NULL,
l2_token_address VARCHAR DEFAULT NULL,
l1_block_number BIGINT DEFAULT NULL,
l2_block_number BIGINT DEFAULT NULL,
l1_tx_hash VARCHAR DEFAULT NULL,
l1_log_index INTEGER DEFAULT NULL,
l2_tx_hash VARCHAR DEFAULT NULL,
tx_status SMALLINT NOT NULL,
block_timestamp BIGINT NOT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX idx_l1hash_l1logindex ON bridge_batch_deposit_event_v2 (l1_tx_hash, l1_log_index);
CREATE INDEX IF NOT EXISTS idx_bbde_batchidx_sender ON bridge_batch_deposit_event_v2 (batch_index, sender);
CREATE INDEX IF NOT EXISTS idx_bbde_l1_block_number ON bridge_batch_deposit_event_v2 (l1_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l2_block_number ON bridge_batch_deposit_event_v2 (l2_block_number DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l1_tx_hash ON bridge_batch_deposit_event_v2 (l1_tx_hash DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_l2_tx_hash ON bridge_batch_deposit_event_v2 (l2_tx_hash DESC);
CREATE INDEX IF NOT EXISTS idx_bbde_sender_block_timestamp ON bridge_batch_deposit_event_v2 (sender, block_timestamp DESC);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS bridge_batch_deposit_event_v2;
-- +goose StatementEnd

View File

@@ -27,9 +27,9 @@ func Route(router *gin.Engine, conf *config.Config, reg prometheus.Registerer) {
r := router.Group("api/")
r.GET("/txs", api.TxsByAddressCtl.GetTxsByAddress)
r.GET("/l2/withdrawals", api.L2WithdrawalsByAddressCtl.GetL2WithdrawalsByAddress)
r.GET("/l2/unclaimed/withdrawals", api.L2UnclaimedWithdrawalsByAddressCtl.GetL2UnclaimedWithdrawalsByAddress)
r.GET("/txs", api.HistoryCtrler.GetTxsByAddress)
r.GET("/l2/withdrawals", api.HistoryCtrler.GetL2WithdrawalsByAddress)
r.GET("/l2/unclaimed/withdrawals", api.HistoryCtrler.GetL2UnclaimedWithdrawalsByAddress)
r.POST("/txsbyhashes", api.TxsByHashesCtl.PostQueryTxsByHashes)
r.POST("/txsbyhashes", api.HistoryCtrler.PostQueryTxsByHashes)
}

View File

@@ -1,93 +0,0 @@
package types
// TxStatusType represents the status of a transaction.
type TxStatusType int
// Constants for TxStatusType.
const (
// TxStatusTypeSent is one of the initial statuses for cross-chain messages.
// It is used as the default value to prevent overwriting the transaction status in scenarios where the message status might change
// from a later status (e.g., relayed) back to "sent".
// Example flow (L1 -> L2 message, and L1 fetcher is slower than L2 fetcher):
// 1. The relayed message is first tracked and processed, setting tx_status to TxStatusTypeRelayed.
// 2. The sent message is later processed (same cross-chain message), the tx_status should not over-write TxStatusTypeRelayed.
TxStatusTypeSent TxStatusType = iota
TxStatusTypeSentTxReverted // Not track message hash, thus will not be processed again anymore.
TxStatusTypeRelayed // Terminal status.
// TxStatusTypeFailedRelayed Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeFailedRelayed
// TxStatusTypeRelayTxReverted Retry: this often occurs due to an out of gas (OOG) issue if the transaction was initiated via the frontend.
TxStatusTypeRelayTxReverted
TxStatusTypeSkipped
TxStatusTypeDropped // Terminal status.
// TxStatusBridgeBatchDeposit use deposit token to bridge batch deposit contract
TxStatusBridgeBatchDeposit
// TxStatusBridgeBatchDistribute bridge batch deposit contract distribute tokens to user success
TxStatusBridgeBatchDistribute
// TxStatusBridgeBatchDistributeFailed bridge batch deposit contract distribute tokens to user failed
TxStatusBridgeBatchDistributeFailed
)
// TokenType represents the type of token.
type TokenType int
// Constants for TokenType.
const (
TokenTypeUnknown TokenType = iota
TokenTypeETH
TokenTypeERC20
TokenTypeERC721
TokenTypeERC1155
)
// MessageType represents the type of message.
type MessageType int
// Constants for MessageType.
const (
MessageTypeUnknown MessageType = iota
MessageTypeL1SentMessage
MessageTypeL2SentMessage
MessageTypeL1BatchDeposit
)
// RollupStatusType represents the status of a rollup.
type RollupStatusType int
// Constants for RollupStatusType.
const (
RollupStatusTypeUnknown RollupStatusType = iota
RollupStatusTypeFinalized // only batch finalized status is used.
)
// MessageQueueEventType represents the type of message queue event.
type MessageQueueEventType int
// Constants for MessageQueueEventType.
const (
MessageQueueEventTypeUnknown MessageQueueEventType = iota
MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction
)
// BatchStatusType represents the type of batch status.
type BatchStatusType int
// Constants for BatchStatusType.
const (
BatchStatusTypeUnknown BatchStatusType = iota
BatchStatusTypeCommitted
BatchStatusTypeReverted
BatchStatusTypeFinalized
)
// UpdateStatusType represents the whether batch info is updated in message table.
type UpdateStatusType int
// Constants for UpdateStatusType.
const (
UpdateStatusTypeUnupdated UpdateStatusType = iota
UpdateStatusTypeUpdated
)

View File

@@ -4,6 +4,8 @@ import (
"net/http"
"github.com/gin-gonic/gin"
"scroll-tech/bridge-history-api/internal/orm"
)
const (
@@ -77,18 +79,17 @@ type TxHistoryInfo struct {
ReplayTxHash string `json:"replay_tx_hash"`
RefundTxHash string `json:"refund_tx_hash"`
MessageHash string `json:"message_hash"`
TokenType TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
TokenType orm.TokenType `json:"token_type"` // 0: unknown, 1: eth, 2: erc20, 3: erc721, 4: erc1155
TokenIDs []string `json:"token_ids"` // only for erc721 and erc1155
TokenAmounts []string `json:"token_amounts"` // for eth and erc20, the length is 1, for erc721 and erc1155, the length could be > 1
MessageType MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
MessageType orm.MessageType `json:"message_type"` // 0: unknown, 1: layer 1 message, 2: layer 2 message
L1TokenAddress string `json:"l1_token_address"`
L2TokenAddress string `json:"l2_token_address"`
BlockNumber uint64 `json:"block_number"`
TxStatus TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
TxStatus orm.TxStatusType `json:"tx_status"` // 0: sent, 1: sent failed, 2: relayed, 3: failed relayed, 4: relayed reverted, 5: skipped, 6: dropped
CounterpartChainTx *CounterpartChainTx `json:"counterpart_chain_tx"`
ClaimInfo *ClaimInfo `json:"claim_info"`
BlockTimestamp uint64 `json:"block_timestamp"`
BatchDepositFee string `json:"batch_deposit_fee"` // only for bridge batch deposit
}
// RenderJSON renders response with json

View File

@@ -51,7 +51,7 @@ func lint() {
}
cmd = exec.Command(filepath.Join(goBin(), "golangci-lint"))
cmd.Args = append(cmd.Args, "run", "--config", "../build/.golangci.yml", "--timeout", "10m")
cmd.Args = append(cmd.Args, "run", "--config", "../build/.golangci.yml")
if *v {
cmd.Args = append(cmd.Args, "-v")

View File

@@ -13,7 +13,7 @@ require (
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
github.com/stretchr/testify v1.9.0
github.com/testcontainers/testcontainers-go v0.28.0
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0
@@ -61,7 +61,7 @@ require (
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/cpuguy83/dockercfg v0.3.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/distribution/reference v0.5.0 // indirect
@@ -182,7 +182,7 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/scroll-tech/zktrie v0.7.1 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
github.com/shibumi/go-pathspec v1.3.0 // indirect

View File

@@ -148,8 +148,8 @@ github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoY
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
@@ -607,10 +607,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=

View File

@@ -15,7 +15,7 @@ services:
# Creates a genesis state for the beacon chain using a YAML configuration file and
# a deterministic set of 64 validators.
create-beacon-chain-genesis:
image: "gcr.io/prysmaticlabs/prysm/cmd/prysmctl:HEAD-263557"
image: "gcr.io/prysmaticlabs/prysm/cmd/prysmctl:latest"
command:
- testnet
- generate-genesis

View File

@@ -302,6 +302,8 @@ func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
}
// EstimateBlockL1CommitCalldataSize calculates the calldata size in l1 commit for this block approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func EstimateBlockL1CommitCalldataSize(b *encoding.Block) (uint64, error) {
var size uint64
for _, txData := range b.Transactions {

View File

@@ -338,7 +338,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash,
}
// compute blob versioned hash
c, err := kzg4844.BlobToCommitment(blob)
c, err := kzg4844.BlobToCommitment(*blob)
if err != nil {
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
}
@@ -364,7 +364,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash,
func makeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) {
// blob contains 131072 bytes but we can only utilize 31/32 of these
if len(blobBytes) > 126976 {
return nil, fmt.Errorf("oversized batch payload, blob bytes length: %v, max length: %v", len(blobBytes), 126976)
return nil, fmt.Errorf("oversized batch payload")
}
// the canonical (padded) blob payload
@@ -435,12 +435,12 @@ func (b *DABatch) BlobDataProof() ([]byte, error) {
return nil, errors.New("called BlobDataProof with empty z")
}
commitment, err := kzg4844.BlobToCommitment(b.blob)
commitment, err := kzg4844.BlobToCommitment(*b.blob)
if err != nil {
return nil, fmt.Errorf("failed to create blob commitment")
}
proof, y, err := kzg4844.ComputeProof(b.blob, *b.z)
proof, y, err := kzg4844.ComputeProof(*b.blob, *b.z)
if err != nil {
log.Crit("failed to create KZG proof at point", "err", err, "z", hex.EncodeToString(b.z[:]))
}
@@ -472,7 +472,8 @@ func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
if err != nil {
return 0, err
}
return calculatePaddedBlobSize(metadataSize + chunkDataSize), nil
paddedSize := ((metadataSize + chunkDataSize + 30) / 31) * 32
return paddedSize, nil
}
// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
@@ -486,7 +487,8 @@ func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
}
batchDataSize += chunkDataSize
}
return calculatePaddedBlobSize(metadataSize + batchDataSize), nil
paddedSize := ((metadataSize + batchDataSize + 30) / 31) * 32
return paddedSize, nil
}
func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
@@ -504,134 +506,3 @@ func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
}
return dataSize, nil
}
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
const CalldataNonZeroByteGas = 16
// GetKeccak256Gas calculates the gas cost for computing the keccak256 hash of a given size.
func GetKeccak256Gas(size uint64) uint64 {
return GetMemoryExpansionCost(size) + 30 + 6*((size+31)/32)
}
// GetMemoryExpansionCost calculates the cost of memory expansion for a given memoryByteSize.
func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
memorySizeWord := (memoryByteSize + 31) / 32
memoryCost := (memorySizeWord*memorySizeWord)/512 + (3 * memorySizeWord)
return memoryCost
}
// EstimateBlockL1CommitGas calculates the total L1 commit gas for this block approximately.
func EstimateBlockL1CommitGas(b *encoding.Block) uint64 {
var total uint64
var numL1Messages uint64
for _, txData := range b.Transactions {
if txData.Type == types.L1MessageTxType {
numL1Messages++
continue
}
}
// 60 bytes BlockContext calldata
total += CalldataNonZeroByteGas * 60
// sload
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
// staticcall
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue
total += GetMemoryExpansionCost(36) * numL1Messages // staticcall to proxy
total += 100 * numL1Messages // read admin in proxy
total += 100 * numL1Messages // read impl in proxy
total += 100 * numL1Messages // access impl
total += GetMemoryExpansionCost(36) * numL1Messages // delegatecall to impl
return total
}
// EstimateChunkL1CommitCalldataSize calculates the calldata size needed for committing a chunk to L1 approximately.
func EstimateChunkL1CommitCalldataSize(c *encoding.Chunk) uint64 {
return uint64(60 * len(c.Blocks))
}
// EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately.
func EstimateChunkL1CommitGas(c *encoding.Chunk) uint64 {
var totalNonSkippedL1Messages uint64
var totalL1CommitGas uint64
for _, block := range c.Blocks {
totalNonSkippedL1Messages += uint64(len(block.Transactions)) - block.NumL2Transactions()
blockL1CommitGas := EstimateBlockL1CommitGas(block)
totalL1CommitGas += blockL1CommitGas
}
numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalNonSkippedL1Messages) // chunk hash
return totalL1CommitGas
}
// EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately.
func EstimateBatchL1CommitGas(b *encoding.Batch) uint64 {
var totalL1CommitGas uint64
// Add extra gas costs
totalL1CommitGas += 100000 // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 21000 // base fee for tx
totalL1CommitGas += CalldataNonZeroByteGas // version in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += GetKeccak256Gas(89 + 32) // parent batch header hash, length is estimated as 89 (constant part)+ 32 (1 skippedL1MessageBitmap)
totalL1CommitGas += CalldataNonZeroByteGas * (89 + 32) // parent batch header in calldata
// adjust batch data hash gas cost
totalL1CommitGas += GetKeccak256Gas(uint64(32 * len(b.Chunks)))
totalL1MessagePoppedBefore := b.TotalL1MessagePoppedBefore
for _, chunk := range b.Chunks {
chunkL1CommitGas := EstimateChunkL1CommitGas(chunk)
totalL1CommitGas += chunkL1CommitGas
totalL1MessagePoppedInChunk := chunk.NumL1Messages(totalL1MessagePoppedBefore)
totalL1MessagePoppedBefore += totalL1MessagePoppedInChunk
totalL1CommitGas += CalldataNonZeroByteGas * (32 * (totalL1MessagePoppedInChunk + 255) / 256)
totalL1CommitGas += GetKeccak256Gas(89 + 32*(totalL1MessagePoppedInChunk+255)/256)
totalL1CommitCalldataSize := EstimateChunkL1CommitCalldataSize(chunk)
totalL1CommitGas += GetMemoryExpansionCost(totalL1CommitCalldataSize)
}
return totalL1CommitGas
}
// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) uint64 {
var totalL1CommitCalldataSize uint64
for _, chunk := range b.Chunks {
totalL1CommitCalldataSize += EstimateChunkL1CommitCalldataSize(chunk)
}
return totalL1CommitCalldataSize
}
// calculatePaddedBlobSize calculates the required size on blob storage
// where every 32 bytes can store only 31 bytes of actual data, with the first byte being zero.
func calculatePaddedBlobSize(dataSize uint64) uint64 {
paddedSize := (dataSize / 31) * 32
if dataSize%31 != 0 {
paddedSize += 1 + dataSize%31 // Add 1 byte for the first empty byte plus the remainder bytes
}
return paddedSize
}

View File

@@ -592,7 +592,7 @@ func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
actualZ := hex.EncodeToString(z[:])
assert.Equal(t, tc.expectedz, actualZ)
_, y, err := kzg4844.ComputeProof(b, *z)
_, y, err := kzg4844.ComputeProof(*b, *z)
assert.NoError(t, err)
actualY := hex.EncodeToString(y[:])
assert.Equal(t, tc.expectedy, actualY)
@@ -759,121 +759,49 @@ func TestCodecV1BatchSkipBitmap(t *testing.T) {
assert.Equal(t, 42, int(batch.TotalL1MessagePopped))
}
func TestCodecV1ChunkAndBatchCommitBlobSizeEstimation(t *testing.T) {
func TestCodecV1ChunkAndBatchBlobSizeEstimation(t *testing.T) {
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
chunk2BlobSize, err := EstimateChunkL1CommitBlobSize(chunk2)
assert.NoError(t, err)
assert.Equal(t, uint64(302), chunk2BlobSize)
assert.Equal(t, uint64(320), chunk2BlobSize)
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch2BlobSize, err := EstimateBatchL1CommitBlobSize(batch2)
assert.NoError(t, err)
assert.Equal(t, uint64(302), batch2BlobSize)
assert.Equal(t, uint64(320), batch2BlobSize)
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
chunk3BlobSize, err := EstimateChunkL1CommitBlobSize(chunk3)
assert.NoError(t, err)
assert.Equal(t, uint64(5929), chunk3BlobSize)
assert.Equal(t, uint64(5952), chunk3BlobSize)
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch3BlobSize, err := EstimateBatchL1CommitBlobSize(batch3)
assert.NoError(t, err)
assert.Equal(t, uint64(5929), batch3BlobSize)
assert.Equal(t, uint64(5952), batch3BlobSize)
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk4BlobSize, err := EstimateChunkL1CommitBlobSize(chunk4)
assert.NoError(t, err)
assert.Equal(t, uint64(98), chunk4BlobSize)
assert.Equal(t, uint64(128), chunk4BlobSize)
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch4BlobSize, err := EstimateBatchL1CommitBlobSize(batch4)
assert.NoError(t, err)
assert.Equal(t, uint64(98), batch4BlobSize)
assert.Equal(t, uint64(128), batch4BlobSize)
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
chunk5BlobSize, err := EstimateChunkL1CommitBlobSize(chunk5)
assert.NoError(t, err)
assert.Equal(t, uint64(6166), chunk5BlobSize)
assert.Equal(t, uint64(6176), chunk5BlobSize)
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk6BlobSize, err := EstimateChunkL1CommitBlobSize(chunk6)
assert.NoError(t, err)
assert.Equal(t, uint64(98), chunk6BlobSize)
assert.Equal(t, uint64(128), chunk6BlobSize)
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
batch5BlobSize, err := EstimateBatchL1CommitBlobSize(batch5)
assert.NoError(t, err)
assert.Equal(t, uint64(6199), batch5BlobSize)
}
func TestCodecV1ChunkAndBatchCommitCalldataSizeEstimation(t *testing.T) {
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
chunk2CalldataSize := EstimateChunkL1CommitCalldataSize(chunk2)
assert.Equal(t, uint64(60), chunk2CalldataSize)
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch2CalldataSize := EstimateBatchL1CommitCalldataSize(batch2)
assert.Equal(t, uint64(60), batch2CalldataSize)
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
chunk3CalldataSize := EstimateChunkL1CommitCalldataSize(chunk3)
assert.Equal(t, uint64(60), chunk3CalldataSize)
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch3CalldataSize := EstimateBatchL1CommitCalldataSize(batch3)
assert.Equal(t, uint64(60), batch3CalldataSize)
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk4CalldataSize := EstimateChunkL1CommitCalldataSize(chunk4)
assert.Equal(t, uint64(60), chunk4CalldataSize)
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch4BlobSize := EstimateBatchL1CommitCalldataSize(batch4)
assert.Equal(t, uint64(60), batch4BlobSize)
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
chunk5CalldataSize := EstimateChunkL1CommitCalldataSize(chunk5)
assert.Equal(t, uint64(120), chunk5CalldataSize)
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk6BlobSize := EstimateChunkL1CommitCalldataSize(chunk6)
assert.Equal(t, uint64(60), chunk6BlobSize)
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
batch5CalldataSize := EstimateBatchL1CommitCalldataSize(batch5)
assert.Equal(t, uint64(180), batch5CalldataSize)
}
func TestCodecV1ChunkAndBatchCommitGasEstimation(t *testing.T) {
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
chunk2Gas := EstimateChunkL1CommitGas(chunk2)
assert.Equal(t, uint64(2084), chunk2Gas)
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch2Gas := EstimateBatchL1CommitGas(batch2)
assert.Equal(t, uint64(158609), batch2Gas)
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
chunk3Gas := EstimateChunkL1CommitGas(chunk3)
assert.Equal(t, uint64(2084), chunk3Gas)
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch3Gas := EstimateBatchL1CommitGas(batch3)
assert.Equal(t, uint64(158609), batch3Gas)
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk4Gas := EstimateChunkL1CommitGas(chunk4)
assert.Equal(t, uint64(4705), chunk4Gas)
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch4Gas := EstimateBatchL1CommitGas(batch4)
assert.Equal(t, uint64(161262), batch4Gas)
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
chunk5Gas := EstimateChunkL1CommitGas(chunk5)
assert.Equal(t, uint64(4122), chunk5Gas)
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk6Gas := EstimateChunkL1CommitGas(chunk6)
assert.Equal(t, uint64(4705), chunk6Gas)
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
batch5Gas := EstimateBatchL1CommitGas(batch5)
assert.Equal(t, uint64(165967), batch5Gas)
assert.Equal(t, uint64(6208), batch5BlobSize)
}
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.4.6"
var tag = "v4.4.3"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -6,6 +6,7 @@ import {AccessControlEnumerableUpgradeable} from "@openzeppelin/contracts-upgrad
import {ReentrancyGuardUpgradeable} from "@openzeppelin/contracts-upgradeable/security/ReentrancyGuardUpgradeable.sol";
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {AddressUpgradeable} from "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol";
import {IL1ERC20Gateway} from "../L1/gateways/IL1ERC20Gateway.sol";
import {IL1GatewayRouter} from "../L1/gateways/IL1GatewayRouter.sol";
@@ -82,7 +83,7 @@ contract L1BatchBridgeGateway is AccessControlEnumerableUpgradeable, ReentrancyG
/// @notice The safe gas limit for batch bridge.
uint256 private constant SAFE_BATCH_BRIDGE_GAS_LIMIT = 200000;
/// @notice The address of corresponding `L2BatchBridgeGateway` contract.
/// @notice The address of corresponding `L2BatchDepositGateway` contract.
address public immutable counterpart;
/// @notice The address of `L1GatewayRouter` contract.
@@ -165,7 +166,7 @@ contract L1BatchBridgeGateway is AccessControlEnumerableUpgradeable, ReentrancyG
* Constructor *
***************/
/// @param _counterpart The address of `L2BatchBridgeGateway` contract in L2.
/// @param _counterpart The address of `L2BatchDepositGateway` contract in L2.
/// @param _router The address of `L1GatewayRouter` contract in L1.
/// @param _messenger The address of `L1ScrollMessenger` contract in L1.
/// @param _queue The address of `L1MessageQueue` contract in L1.
@@ -183,7 +184,7 @@ contract L1BatchBridgeGateway is AccessControlEnumerableUpgradeable, ReentrancyG
queue = _queue;
}
/// @notice Initialize the storage of `L1BatchBridgeGateway`.
/// @notice Initialize the storage of `L1BatchDepositGateway`.
/// @param _feeVault The address of fee vault contract.
function initialize(address _feeVault) external initializer {
__Context_init(); // from ContextUpgradeable
@@ -208,7 +209,7 @@ contract L1BatchBridgeGateway is AccessControlEnumerableUpgradeable, ReentrancyG
}
/// @notice Deposit ETH.
function depositETH() external payable nonReentrant {
function depositETH() external payable {
// no safe cast check here, since no one has so much ETH yet.
_deposit(address(0), _msgSender(), uint96(msg.value));
}
@@ -217,7 +218,7 @@ contract L1BatchBridgeGateway is AccessControlEnumerableUpgradeable, ReentrancyG
///
/// @param token The address of token.
/// @param amount The amount of token to deposit. We use type `uint96`, since it is enough for most of the major tokens.
function depositERC20(address token, uint96 amount) external nonReentrant {
function depositERC20(address token, uint96 amount) external {
if (token == address(0)) revert ErrorIncorrectMethodForETHDeposit();
// common practice to handle fee on transfer token.
@@ -238,7 +239,7 @@ contract L1BatchBridgeGateway is AccessControlEnumerableUpgradeable, ReentrancyG
///
/// @param token The address of token to update.
/// @param newConfig The new config.
function setBatchConfig(address token, BatchConfig calldata newConfig) external onlyRole(DEFAULT_ADMIN_ROLE) {
function setBatchConfig(address token, BatchConfig memory newConfig) external onlyRole(DEFAULT_ADMIN_ROLE) {
if (
newConfig.maxTxsPerBatch == 0 ||
newConfig.maxDelayPerBatch == 0 ||
@@ -344,15 +345,15 @@ contract L1BatchBridgeGateway is AccessControlEnumerableUpgradeable, ReentrancyG
address token,
address sender,
uint96 amount
) internal {
) internal nonReentrant {
BatchConfig memory cachedBatchConfig = configs[token];
TokenState memory cachedTokenState = tokens[token];
_tryFinalizeCurrentBatch(token, cachedBatchConfig, cachedTokenState);
BatchState memory cachedBatchState = batches[token][cachedTokenState.currentBatchIndex];
if (amount < cachedBatchConfig.minAmountPerTx) {
revert ErrorDepositAmountTooSmall();
}
BatchState memory cachedBatchState = batches[token][cachedTokenState.currentBatchIndex];
emit Deposit(sender, token, cachedTokenState.currentBatchIndex, amount, cachedBatchConfig.feeAmountPerTx);

View File

@@ -667,8 +667,6 @@ github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMP
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI=
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ=
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
@@ -1492,11 +1490,7 @@ github.com/scroll-tech/go-ethereum v1.10.14-0.20230306131930-03b4de32b78b/go.mod
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04/go.mod h1:jH8c08L9K8Hieaf0r/ur2P/cpesn4dFhmLm2Mmoi8kI=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230802095950-4b2bbf6225e7/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
@@ -1565,6 +1559,9 @@ github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZy
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM=
github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=

5134
prover_rust/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

46
prover_rust/Cargo.toml Normal file
View File

@@ -0,0 +1,46 @@
[package]
name = "prover_rust"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[patch.crates-io]
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
[dependencies]
anyhow = "1.0"
log = "0.4"
serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
futures = "0.3.30"
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.0rc4", default-features = false, features = ["parallel_syn", "scroll"] }
eth-types = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3" }
base64 = "0.13.1"
reqwest = { version = "0.12.4", features = ["gzip"] }
once_cell = "1.19.0"
hex = "0.4.3"
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
rand = "0.8.5"
eth-keystore = "0.5.0"
rlp = "0.5.2"
tokio = "1.37.0"
env_logger = "0.11.3"
sled = "0.34.7"
http = "1.1.0"

27
prover_rust/Makefile Normal file
View File

@@ -0,0 +1,27 @@
.PHONY: prover
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
GIT_REV=$(shell git rev-parse --short HEAD)
ifeq (${HALO2_GPU_VERSION},)
# use halo2_proofs with CPU
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
endif
prover:
GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
rm -rf ./lib && mkdir ./lib
find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib

22
prover_rust/config.json Normal file
View File

@@ -0,0 +1,22 @@
{
"prover_name": "prover-1",
"hard_fork_name": "homestead",
"keystore_path": "keystore.json",
"keystore_password": "prover-pwd",
"db_path": "unique-db-path-for-prover-1",
"core": {
"params_path": "params",
"assets_path": "assets",
"proof_type": 2
},
"coordinator": {
"base_url": "http://localhost:8555",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
},
"l2geth": {
"endpoint": "http://localhost:9999",
"confirmations": "0x1"
}
}

View File

@@ -0,0 +1,21 @@
#!/bin/bash
config_file="$HOME/.cargo/config"
if [ ! -e "$config_file" ]; then
exit 0
fi
if [[ $(head -n 1 "$config_file") == "#"* ]]; then
exit 0
fi
halo2gpu_path=$(grep -Po '(?<=paths = \[")([^"]*)' $config_file)
pushd $halo2gpu_path
commit_hash=$(git log --pretty=format:%h -n 1)
echo "${commit_hash:0:7}"
popd

View File

@@ -0,0 +1 @@
nightly-2023-12-03

9
prover_rust/rustfmt.toml Normal file
View File

@@ -0,0 +1,9 @@
edition = "2021"
comment_width = 100
imports_granularity = "Crate"
max_width = 100
newline_style = "Unix"
# normalize_comments = true
reorder_imports = true
wrap_comments = true

57
prover_rust/src/config.rs Normal file
View File

@@ -0,0 +1,57 @@
use ethers_core::types::BlockNumber;
use serde::{Deserialize, Serialize};
// use serde_json::Error;
use std::{error::Error, fs::File};
use crate::types::ProofType;
#[derive(Debug, Serialize, Deserialize)]
pub struct ProverCoreConfig {
pub params_path: String,
pub assets_path: String,
#[serde(default)]
pub proof_type: ProofType,
#[serde(default)]
pub dump_dir: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CoordinatorConfig {
pub base_url: String,
pub retry_count: u16,
pub retry_wait_time_sec: u32,
pub connection_timeout_sec: u32,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct L2GethConfig {
pub endpoint: String,
pub confirmations: BlockNumber,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
pub prover_name: String,
pub hard_fork_name: String,
pub keystore_path: String,
pub keystore_password: String,
pub db_path: String,
pub core: ProverCoreConfig,
pub coordinator: CoordinatorConfig,
pub l2geth: Option<L2GethConfig>,
}
impl Config {
pub fn from_reader<R>(reader: R) -> Result<Self, Box<dyn Error>>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| Box::new(e) as Box<dyn Error>)
}
pub fn from_file(file_name: String) -> Result<Self, Box<dyn Error>> {
let file = File::open(file_name)?;
Config::from_reader(&file)
}
}

View File

@@ -0,0 +1,137 @@
mod api;
mod errors;
pub mod listener;
pub mod types;
use anyhow::{bail, Context, Ok, Result};
use std::rc::Rc;
use api::API;
use errors::*;
use listener::Listener;
use log;
use tokio::runtime::Runtime;
use types::*;
use crate::key_signer::KeySigner;
pub struct Config {
pub endpoint: String,
pub prover_name: String,
pub prover_version: String,
pub hard_fork_name: String,
}
pub struct CoordinatorClient {
api: API,
token: Option<String>,
config: Config,
key_signer: Rc<KeySigner>,
rt: Runtime,
listener: Box<dyn Listener>,
}
impl CoordinatorClient {
pub fn new(
config: Config,
key_signer: Rc<KeySigner>,
listener: Box<dyn Listener>,
) -> Result<Self> {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
let mut client = Self {
api: API::new(&config.endpoint)?,
token: None,
config,
key_signer,
rt,
listener,
};
client.login()?;
Ok(client)
}
fn login(&mut self) -> Result<()> {
let api = &self.api;
let challenge_response = self.rt.block_on(api.challenge())?;
if challenge_response.errcode != Success {
bail!("challenge failed: {}", challenge_response.errmsg)
}
let mut token: String;
if let Some(r) = challenge_response.data {
token = r.token;
} else {
bail!("challenge failed: got empty token")
}
let login_message = LoginMessage {
challenge: token.clone(),
prover_name: self.config.prover_name.clone(),
prover_version: self.config.prover_version.clone(),
hard_fork_name: self.config.hard_fork_name.clone(),
};
let buffer = login_message.rlp();
let signature = self.key_signer.sign_buffer(&buffer)?;
let login_request = LoginRequest {
message: login_message,
signature: signature,
};
let login_response = self.rt.block_on(api.login(&login_request, &token))?;
if login_response.errcode != Success {
bail!("login failed: {}", login_response.errmsg)
}
if let Some(r) = login_response.data {
token = r.token;
} else {
bail!("login failed: got empty token")
}
self.token = Some(token);
Ok(())
}
fn action_with_re_login<T, F, R>(&mut self, req: &R, mut f: F) -> Result<Response<T>>
where
F: FnMut(&mut Self, &R) -> Result<Response<T>>,
{
let response = f(self, req)?;
if response.errcode == ErrJWTTokenExpired {
log::info!("JWT expired, attempting to re-login");
self.login().context("JWT expired, re-login failed")?;
log::info!("re-login success");
return f(self, req);
} else if response.errcode != Success {
bail!("action failed: {}", response.errmsg)
}
Ok(response)
}
fn do_get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
self.rt
.block_on(self.api.get_task(req, self.token.as_ref().unwrap()))
}
pub fn get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
self.action_with_re_login(req, |s, req| s.do_get_task(req))
}
fn do_submit_proof(
&mut self,
req: &SubmitProofRequest,
) -> Result<Response<SubmitProofResponseData>> {
let response = self
.rt
.block_on(self.api.submit_proof(req, &self.token.as_ref().unwrap()))?;
self.listener.on_proof_submitted(req);
Ok(response)
}
pub fn submit_proof(
&mut self,
req: &SubmitProofRequest,
) -> Result<Response<SubmitProofResponseData>> {
self.action_with_re_login(req, |s, req| s.do_submit_proof(req))
}
}

View File

@@ -0,0 +1,105 @@
use super::types::*;
use anyhow::{bail, Result};
use reqwest::{header::CONTENT_TYPE, Url};
use serde::Serialize;
pub struct API {
url_base: Url,
pub client: reqwest::Client,
}
impl API {
pub fn new(url_base: &String) -> Result<Self> {
Ok(Self {
url_base: Url::parse(&url_base)?,
client: reqwest::Client::new(),
})
}
pub async fn challenge(&self) -> Result<Response<ChallengeResponseData>> {
let method = "/coordinator/v1/challenge";
let url = self.build_url(method)?;
let response = self
.client
.get(url)
.header(CONTENT_TYPE, "application/json")
.send()
.await?;
let response_body = response.text().await?;
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
}
pub async fn login(
&self,
req: &LoginRequest,
token: &String,
) -> Result<Response<LoginResponseData>> {
let method = "/coordinator/v1/login";
self.post_with_token(&method, req, token).await
}
pub async fn get_task(
&self,
req: &GetTaskRequest,
token: &String,
) -> Result<Response<GetTaskResponseData>> {
let method = "/coordinator/v1/get_task";
self.post_with_token(&method, req, token).await
}
pub async fn submit_proof(
&self,
req: &SubmitProofRequest,
token: &String,
) -> Result<Response<SubmitProofResponseData>> {
let method = "/coordinator/v1/submit_proof";
self.post_with_token(&method, req, token).await
}
async fn post_with_token<Req, Resp>(
&self,
method: &str,
req: &Req,
token: &String,
) -> Result<Resp>
where
Req: ?Sized + Serialize,
Resp: serde::de::DeserializeOwned,
{
let url = self.build_url(method)?;
let request_body = serde_json::to_string(req)?;
log::info!("[coordinator client], {method}, request: {request_body}");
let response = self
.client
.post(url)
.header(CONTENT_TYPE, "application/json")
.bearer_auth(token)
.body(request_body)
.send()
.await?;
if response.status() != http::status::StatusCode::OK {
log::error!(
"[coordinator client], {method}, status not ok: {}",
response.status()
);
bail!(
"[coordinator client], {method}, status not ok: {}",
response.status()
)
}
let response_body = response.text().await?;
log::info!("[coordinator client], {method}, response: {response_body}");
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
}
fn build_url(&self, method: &str) -> Result<Url> {
self.url_base.join(method).map_err(|e| anyhow::anyhow!(e))
}
}

View File

@@ -0,0 +1,17 @@
// TODO: refactor using enum
pub type ErrorCode = i32;
pub const Success: ErrorCode = 0;
pub const InternalServerError: ErrorCode = 500;
pub const ErrJWTCommonErr: ErrorCode = 50000;
pub const ErrJWTTokenExpired: ErrorCode = 50001;
pub const ErrProverStatsAPIParameterInvalidNo: ErrorCode = 10001;
pub const ErrProverStatsAPIProverTaskFailure: ErrorCode = 10002;
pub const ErrProverStatsAPIProverTotalRewardFailure: ErrorCode = 10003;
pub const ErrCoordinatorParameterInvalidNo: ErrorCode = 20001;
pub const ErrCoordinatorGetTaskFailure: ErrorCode = 20002;
pub const ErrCoordinatorHandleZkProofFailure: ErrorCode = 20003;
pub const ErrCoordinatorEmptyProofData: ErrorCode = 20004;

View File

@@ -0,0 +1,5 @@
use super::SubmitProofRequest;
pub trait Listener {
fn on_proof_submitted(&self, req: &SubmitProofRequest);
}

View File

@@ -0,0 +1,76 @@
use crate::types::{ProofFailureType, ProofStatus};
use rlp::RlpStream;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub struct Response<T> {
pub errcode: i32,
pub errmsg: String,
pub data: Option<T>,
}
#[derive(Serialize, Deserialize)]
pub struct LoginMessage {
pub challenge: String,
pub prover_name: String,
pub prover_version: String,
pub hard_fork_name: String,
}
impl LoginMessage {
pub fn rlp(&self) -> Vec<u8> {
let mut rlp = RlpStream::new();
let num_fields = 4;
rlp.begin_list(num_fields);
rlp.append(&self.prover_name);
rlp.append(&self.prover_version);
rlp.append(&self.challenge);
rlp.append(&self.hard_fork_name);
rlp.out().freeze().into()
}
}
#[derive(Serialize, Deserialize)]
pub struct LoginRequest {
pub message: LoginMessage,
pub signature: String,
}
#[derive(Serialize, Deserialize)]
pub struct LoginResponseData {
pub time: String,
pub token: String,
}
pub type ChallengeResponseData = LoginResponseData;
#[derive(Default, Serialize, Deserialize)]
pub struct GetTaskRequest {
pub task_type: crate::types::ProofType,
pub prover_height: Option<u64>,
pub vks: Vec<String>,
pub vk: String,
}
#[derive(Serialize, Deserialize)]
pub struct GetTaskResponseData {
pub uuid: String,
pub task_id: String,
pub task_type: crate::types::ProofType,
pub task_data: String,
pub hard_fork_name: Option<String>,
}
#[derive(Serialize, Deserialize, Default)]
pub struct SubmitProofRequest {
pub uuid: String,
pub task_id: String,
pub task_type: crate::types::ProofType,
pub status: ProofStatus,
pub proof: String,
pub failure_type: Option<ProofFailureType>,
pub failure_msg: Option<String>,
}
#[derive(Serialize, Deserialize)]
pub struct SubmitProofResponseData {}

View File

@@ -0,0 +1,81 @@
pub mod types;
use crate::types::CommonHash;
use anyhow::Result;
use ethers_core::types::BlockNumber;
use tokio::runtime::Runtime;
use types::{BlockTrace, Header};
use ethers_providers::{Http, Provider};
/// Serialize a type.
///
/// # Panics
///
/// If the type returns an error during serialization.
pub fn serialize<T: serde::Serialize>(t: &T) -> serde_json::Value {
serde_json::to_value(t).expect("Types never fail to serialize.")
}
pub struct GethClient {
id: String,
provider: Provider<Http>,
rt: Runtime,
}
impl GethClient {
pub fn new(id: &str, api_url: &str) -> Result<Self> {
let provider = Provider::<Http>::try_from(api_url)?;
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
Ok(Self {
id: id.to_string(),
provider,
rt,
})
}
pub fn get_block_trace_by_hash(&mut self, hash: &CommonHash) -> Result<BlockTrace> {
log::info!(
"{}: calling get_block_trace_by_hash, hash: {}",
self.id,
hash
);
let trace_future = self
.provider
.request("scroll_getBlockTraceByNumberOrHash", [format!("{hash:#x}")]);
let trace = self.rt.block_on(trace_future)?;
Ok(trace)
}
pub fn header_by_number(&mut self, block_number: &BlockNumber) -> Result<Header> {
log::info!(
"{}: calling header_by_number, hash: {}",
self.id,
block_number
);
let hash = serialize(block_number);
let include_txs = serialize(&false);
let trace_future = self
.provider
.request("eth_getBlockByNumber", [hash, include_txs]);
let trace = self.rt.block_on(trace_future)?;
Ok(trace)
}
pub fn block_number(&mut self) -> Result<BlockNumber> {
log::info!("{}: calling block_number", self.id);
let trace_future = self.provider.request("eth_blockNumber", ());
let trace = self.rt.block_on(trace_future)?;
Ok(trace)
}
}

View File

@@ -0,0 +1,40 @@
use eth_types::{H256, U64};
use serde::{Deserialize, Serialize};
use crate::types::CommonHash;
use prover::BlockTrace as ProverBlockTrace;
/// l2 block full trace
#[derive(Deserialize, Serialize, Default, Debug, Clone)]
pub struct BlockTrace {
#[serde(flatten)]
pub block_trace: ProverBlockTrace,
pub version: String,
pub withdraw_trie_root: Option<CommonHash>,
#[serde(rename = "mptwitness", default)]
pub mpt_witness: Vec<u8>,
}
pub fn get_block_number(block_trace: &ProverBlockTrace) -> Option<u64> {
block_trace.header.number.map(|n| n.as_u64())
}
pub type TxHash = H256;
/// this struct is tracked to https://github.com/scroll-tech/go-ethereum/blob/0f0cd99f7a2e/core/types/block.go#Header
/// the detail fields of struct are not 100% same as eth_types::Block so this needs to be changed in
/// some time currently only the `number` field is required
#[derive(Debug, Deserialize, Serialize, Default)]
pub struct Header {
#[serde(flatten)]
block: eth_types::Block<TxHash>,
}
impl Header {
pub fn get_number(&self) -> Option<U64> {
self.block.number
}
}

View File

@@ -0,0 +1,105 @@
use std::path::Path;
use anyhow::Result;
use ethers_core::{
k256::{
ecdsa::{signature::hazmat::PrehashSigner, RecoveryId, Signature, SigningKey},
elliptic_curve::{sec1::ToEncodedPoint, FieldBytes},
PublicKey, Secp256k1, SecretKey,
},
types::Signature as EthSignature,
};
use eth_types::{H256, U256};
use hex::ToHex;
use tiny_keccak::{Hasher, Keccak};
pub struct KeySigner {
public_key: PublicKey,
signer: SigningKey,
}
impl KeySigner {
pub fn new(key_path: &str, passwd: &str) -> Result<Self> {
let p = Path::new(key_path);
let secret = if !p.exists() {
let dir = p.parent().unwrap();
let name = p.file_name().and_then(|s| s.to_str());
let mut rng = rand::thread_rng();
let (secret, _) = eth_keystore::new(dir, &mut rng, passwd, name)?;
secret
} else {
eth_keystore::decrypt_key(key_path, passwd).map_err(|e| anyhow::anyhow!(e))?
};
let secret_key = SecretKey::from_bytes(secret.as_slice().into())?;
let signer = SigningKey::from(secret_key.clone());
Ok(Self {
public_key: secret_key.public_key(),
signer: signer,
})
}
pub fn get_public_key(&self) -> String {
let v: Vec<u8> = Vec::from(self.public_key.to_encoded_point(true).as_bytes());
buffer_to_hex(&v, false)
}
/// Signs the provided hash.
pub fn sign_hash(&self, hash: H256) -> Result<EthSignature> {
let signer = &self.signer as &dyn PrehashSigner<(Signature, RecoveryId)>;
let (recoverable_sig, recovery_id) = signer.sign_prehash(hash.as_ref())?;
let v = u8::from(recovery_id) as u64;
let r_bytes: FieldBytes<Secp256k1> = recoverable_sig.r().into();
let s_bytes: FieldBytes<Secp256k1> = recoverable_sig.s().into();
let r = U256::from_big_endian(r_bytes.as_slice());
let s = U256::from_big_endian(s_bytes.as_slice());
Ok(EthSignature { r, s, v })
}
pub fn sign_buffer<T>(&self, buffer: &T) -> Result<String>
where
T: AsRef<[u8]>,
{
let pre_hash = keccak256(buffer);
let hash_str = buffer_to_hex(&pre_hash, true);
println!("hash is {hash_str}");
let hash = H256::from(pre_hash);
let sig = self.sign_hash(hash)?;
Ok(buffer_to_hex(&sig.to_vec(), true))
}
}
fn buffer_to_hex<T>(buffer: &T, has_prefix: bool) -> String
where
T: AsRef<[u8]>,
{
if has_prefix {
format!("0x{}", buffer.encode_hex::<String>())
} else {
buffer.encode_hex::<String>()
}
}
/// Compute the Keccak-256 hash of input bytes.
///
/// Note that strings are interpreted as UTF-8 bytes,
// TODO: Add Solidity Keccak256 packing support
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> [u8; 32] {
let mut output = [0u8; 32];
let mut hasher = Keccak::v256();
hasher.update(bytes.as_ref());
hasher.finalize(&mut output);
output
}

142
prover_rust/src/main.rs Normal file
View File

@@ -0,0 +1,142 @@
mod config;
mod coordinator_client;
mod geth_client;
mod key_signer;
mod prover;
mod task_cache;
mod types;
mod utils_log;
mod version;
mod zk_circuits_handler;
use anyhow::{Context, Result};
use config::Config;
use coordinator_client::listener::Listener;
use log;
use prover::Prover;
use core::time;
use std::rc::Rc;
use task_cache::TaskCache;
use types::TaskWrapper;
struct ClearCacheCoordinatorListener {
pub task_cache: Rc<TaskCache>,
}
impl Listener for ClearCacheCoordinatorListener {
fn on_proof_submitted(&self, req: &coordinator_client::types::SubmitProofRequest) {
let result = self.task_cache.delete_task(req.task_id.clone());
if let Err(e) = result {
log::error!("delete task from embed db failed, {}", e.to_string());
} else {
log::info!(
"delete task from embed db successfully, task_id: {}",
&req.task_id
);
}
}
}
struct TaskProcessor<'a> {
prover: &'a Prover<'a>,
task_cache: Rc<TaskCache>,
}
impl<'a> TaskProcessor<'a> {
pub fn new(prover: &'a Prover, task_cache: Rc<TaskCache>) -> Self {
TaskProcessor { prover, task_cache }
}
pub fn start(&self) {
loop {
log::info!("start a new round.");
if let Err(err) = self.prove_and_submit() {
log::error!("encounter error: {err}");
} else {
log::info!("prove & submit succeed.");
}
}
}
fn prove_and_submit(&self) -> Result<()> {
let task_from_cache = self
.task_cache
.get_last_task()
.context("failed to peek from stack")?;
let mut task_wrapper = match task_from_cache {
Some(t) => t,
None => {
let fetch_result = self.prover.fetch_task();
if let Err(err) = fetch_result {
std::thread::sleep(time::Duration::from_secs(10));
return Err(err).context("failed to fetch task from coordinator");
}
let task_wrapper: TaskWrapper = fetch_result.unwrap().into();
self.task_cache
.put_task(&task_wrapper)
.context("failed to push task into stack")?;
task_wrapper
}
};
if task_wrapper.get_count() <= 2 {
task_wrapper.increment_count();
self.task_cache
.put_task(&task_wrapper)
.context("failed to push task into stack, updating count")?;
log::info!(
"start to prove task, task_type: {:?}, task_id: {}",
task_wrapper.task.task_type,
task_wrapper.task.id
);
let result = match self.prover.prove_task(&task_wrapper.task) {
Ok(proof_detail) => self
.prover
.submit_proof(&proof_detail, task_wrapper.task.uuid.clone()),
Err(error) => self.prover.submit_error(
&task_wrapper.task,
types::ProofFailureType::NoPanic,
error,
),
};
return result;
}
// if tried times >= 3, it's probably due to circuit proving panic
log::error!(
"zk proving panic for task, task_type: {:?}, task_id: {}",
task_wrapper.task.task_type,
task_wrapper.task.id
);
self.prover.submit_error(
&task_wrapper.task,
types::ProofFailureType::Panic,
anyhow::anyhow!("zk proving panic for task"),
)
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
utils_log::log_init();
let file_name = "config.json";
let config: Config = Config::from_file(file_name.to_string())?;
println!("{:?}", config);
let task_cache = Rc::new(TaskCache::new(&config.db_path)?);
let coordinator_listener = Box::new(ClearCacheCoordinatorListener {
task_cache: task_cache.clone(),
});
let prover = Prover::new(&config, coordinator_listener)?;
let task_processer = TaskProcessor::new(&prover, task_cache);
task_processer.start();
Ok(())
}

324
prover_rust/src/prover.rs Normal file
View File

@@ -0,0 +1,324 @@
use anyhow::{bail, Error, Ok, Result};
use eth_types::U64;
use once_cell::sync::Lazy;
use std::{cell::RefCell, cmp::Ordering, env, rc::Rc};
use crate::{
config::Config,
coordinator_client::{
listener::Listener, types::*, Config as CoordinatorConfig, CoordinatorClient,
},
geth_client::{types::get_block_number, GethClient},
key_signer::KeySigner,
types::{CommonHash, ProofFailureType, ProofStatus, ProofType},
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
};
use super::types::{ProofDetail, Task};
use prover::{BlockTrace, ChunkHash, ChunkProof};
// Only used for debugging.
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
pub struct Prover<'a> {
config: &'a Config,
key_signer: Rc<KeySigner>,
circuits_handler_provider: CircuitsHandlerProvider,
coordinator_client: RefCell<CoordinatorClient>,
geth_client: Option<RefCell<GethClient>>,
}
// a u64 is positive when it's 63th index bit not set
fn is_positive(n: &U64) -> bool {
!n.bit(63)
}
impl<'a> Prover<'a> {
pub fn new(config: &'a Config, coordinator_listener: Box<dyn Listener>) -> Result<Self> {
let proof_type = config.core.proof_type;
let params_path = &config.core.params_path;
let assets_path = &config.core.assets_path;
let keystore_path = &config.keystore_path;
let keystore_password = &config.keystore_password;
let coordinator_config = CoordinatorConfig {
endpoint: config.coordinator.base_url.clone(),
prover_name: config.prover_name.clone(),
prover_version: crate::version::get_version(),
hard_fork_name: config.hard_fork_name.clone(),
};
let key_signer = Rc::new(KeySigner::new(&keystore_path, &keystore_password)?);
let coordinator_client = CoordinatorClient::new(
coordinator_config,
Rc::clone(&key_signer),
coordinator_listener,
)?;
let mut prover = Prover {
config,
key_signer: Rc::clone(&key_signer),
circuits_handler_provider: CircuitsHandlerProvider::new(
proof_type,
params_path,
assets_path,
)?,
coordinator_client: RefCell::new(coordinator_client),
geth_client: None,
};
if config.core.proof_type == ProofType::ProofTypeChunk {
prover.geth_client = Some(RefCell::new(GethClient::new(
"test",
&config.l2geth.as_ref().unwrap().endpoint,
)?));
}
Ok(prover)
}
pub fn get_proof_type(&self) -> ProofType {
self.config.core.proof_type
}
pub fn get_public_key(&self) -> String {
self.key_signer.get_public_key()
}
pub fn fetch_task(&self) -> Result<Task> {
let vks = self.circuits_handler_provider.get_vks();
let vk = vks[0].clone();
let mut req = GetTaskRequest {
task_type: self.get_proof_type(),
prover_height: None,
vks,
vk,
};
if self.get_proof_type() == ProofType::ProofTypeChunk {
let latest_block_number = self.get_latest_block_number_value()?;
if let Some(v) = latest_block_number {
if v.as_u64() == 0 {
bail!("omit to prove task of the genesis block")
}
req.prover_height = Some(v.as_u64());
} else {
bail!("failed to fetch latest confirmed block number, got None")
}
}
let resp = self.coordinator_client.borrow_mut().get_task(&req)?;
Task::try_from(&resp.data.unwrap()).map_err(|e| anyhow::anyhow!(e))
}
pub fn prove_task(&self, task: &Task) -> Result<ProofDetail> {
let version = task.get_version();
if let Some(handler) = self.circuits_handler_provider.get_circuits_client(version) {
self.do_prove(task, handler)
} else {
bail!("failed to get a circuit handler")
}
}
fn do_prove(&self, task: &Task, handler: &Box<dyn CircuitsHandler>) -> Result<ProofDetail> {
let mut proof_detail = ProofDetail {
id: task.id.clone(),
proof_type: task.task_type,
..Default::default()
};
match task.task_type {
ProofType::ProofTypeBatch => {
let chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)> =
self.gen_chunk_hashes_proofs(task)?;
let chunk_proofs: Vec<ChunkProof> =
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
let is_valid = handler.aggregator_check_chunk_proofs(&chunk_proofs)?;
if !is_valid {
bail!("non-match chunk protocol, task-id: {}", &task.id)
}
let batch_proof = handler.aggregator_gen_agg_evm_proof(
chunk_hashes_proofs,
None,
self.get_output_dir(),
)?;
proof_detail.batch_proof = Some(batch_proof);
Ok(proof_detail)
}
ProofType::ProofTypeChunk => {
let chunk_trace = self.gen_chunk_traces(task)?;
let chunk_proof = handler.prover_gen_chunk_proof(
chunk_trace,
None,
None,
self.get_output_dir(),
)?;
proof_detail.chunk_proof = Some(chunk_proof);
Ok(proof_detail)
}
_ => bail!("task type invalid"),
}
}
pub fn submit_proof(&self, proof_detail: &ProofDetail, uuid: String) -> Result<()> {
let proof_data = match proof_detail.proof_type {
ProofType::ProofTypeBatch => {
serde_json::to_string(proof_detail.batch_proof.as_ref().unwrap())?
}
ProofType::ProofTypeChunk => {
serde_json::to_string(proof_detail.chunk_proof.as_ref().unwrap())?
}
_ => unreachable!(),
};
let request = SubmitProofRequest {
uuid,
task_id: proof_detail.id.clone(),
task_type: proof_detail.proof_type,
status: ProofStatus::Ok,
proof: proof_data,
..Default::default()
};
self.do_submit(&request)
}
pub fn submit_error(
&self,
task: &Task,
failure_type: ProofFailureType,
error: Error,
) -> Result<()> {
let request = SubmitProofRequest {
uuid: task.uuid.clone(),
task_id: task.id.clone(),
task_type: task.task_type,
status: ProofStatus::Error,
failure_type: Some(failure_type),
failure_msg: Some(error.to_string()),
..Default::default()
};
self.do_submit(&request)
}
fn do_submit(&self, request: &SubmitProofRequest) -> Result<()> {
self.coordinator_client.borrow_mut().submit_proof(request)?;
Ok(())
}
fn get_latest_block_number_value(&self) -> Result<Option<U64>> {
let number = self
.geth_client
.as_ref()
.unwrap()
.borrow_mut()
.block_number()?;
Ok(number.as_number())
}
// fn get_configured_block_number_value(&self) -> Result<Option<U64>> {
// self.get_block_number_value(&self.config.l2geth.as_ref().unwrap().confirmations)
// }
// fn get_block_number_value(&self, block_number: &BlockNumber) -> Result<Option<U64>> {
// match block_number {
// BlockNumber::Safe | BlockNumber::Finalized => {
// let header =
// self.geth_client.as_ref().unwrap().borrow_mut().header_by_number(block_number)?;
// Ok(header.get_number())
// },
// BlockNumber::Latest => {
// let number = self.geth_client.as_ref().unwrap().borrow_mut().block_number()?;
// Ok(number.as_number())
// },
// BlockNumber::Number(n) if is_positive(n) => {
// let number = self.geth_client.as_ref().unwrap().borrow_mut().block_number()?;
// let diff = number.as_number()
// .filter(|m| m.as_u64() >= n.as_u64())
// .map(|m| U64::from(m.as_u64() - n.as_u64()));
// Ok(diff)
// },
// _ => bail!("unknown confirmation type"),
// }
// }
fn get_output_dir(&self) -> Option<&str> {
OUTPUT_DIR.as_deref()
}
fn gen_chunk_traces(&self, task: &Task) -> Result<Vec<BlockTrace>> {
if let Some(chunk_detail) = task.chunk_task_detail.as_ref() {
self.get_sorted_traces_by_hashes(&chunk_detail.block_hashes)
} else {
bail!("invalid task")
}
}
fn gen_chunk_hashes_proofs(&self, task: &Task) -> Result<Vec<(ChunkHash, ChunkProof)>> {
if let Some(batch_detail) = task.batch_task_detail.as_ref() {
Ok(batch_detail
.chunk_infos
.clone()
.into_iter()
.zip(batch_detail.chunk_proofs.clone())
.collect())
} else {
bail!("invalid task")
}
}
fn get_sorted_traces_by_hashes(
&self,
block_hashes: &Vec<CommonHash>,
) -> Result<Vec<BlockTrace>> {
if block_hashes.len() == 0 {
bail!("blockHashes is empty")
}
let mut block_traces = Vec::new();
for (_, hash) in block_hashes.into_iter().enumerate() {
let trace = self
.geth_client
.as_ref()
.unwrap()
.borrow_mut()
.get_block_trace_by_hash(hash)?;
block_traces.push(trace.block_trace);
}
block_traces.sort_by(|a, b| {
if get_block_number(a) == None {
Ordering::Less
} else if get_block_number(b) == None {
Ordering::Greater
} else {
get_block_number(a)
.unwrap()
.cmp(&get_block_number(b).unwrap())
}
});
let block_numbers: Vec<u64> = block_traces
.iter()
.map(|trace| match get_block_number(trace) {
Some(v) => v,
None => 0,
})
.collect();
let mut i = 0;
while i < block_numbers.len() - 1 {
if block_numbers[i] + 1 != block_numbers[i + 1] {
bail!(
"block numbers are not continuous, got {} and {}",
block_numbers[i],
block_numbers[i + 1]
)
}
i += 1;
}
Ok(block_traces)
}
}

View File

@@ -0,0 +1,40 @@
use anyhow::{Ok, Result};
use crate::types::TaskWrapper;
use sled::{Config, Db};
pub struct TaskCache {
db: Db,
}
impl TaskCache {
pub fn new(db_path: &String) -> Result<Self> {
let config = Config::new().path(db_path);
let db = config.open()?;
Ok(Self{db})
}
pub fn put_task(&self, task_wrapper: &TaskWrapper) -> Result<()> {
let k = task_wrapper.task.id.clone().into_bytes();
let v = serde_json::to_vec(task_wrapper)?;
self.db.insert(k, v)?;
Ok(())
}
pub fn get_last_task(&self) -> Result<Option<TaskWrapper>> {
let last = self.db.last()?;
if let Some((k, v)) = last {
let kk = std::str::from_utf8(k.as_ref())?;
log::info!("get last task, task_id: {kk}");
let task_wrapper: TaskWrapper = serde_json::from_slice(v.as_ref())?;
return Ok(Some(task_wrapper));
}
Ok(None)
}
pub fn delete_task(&self, task_id: String) -> Result<()> {
let k = task_id.into_bytes();
self.db.remove(k)?;
Ok(())
}
}

237
prover_rust/src/types.rs Normal file
View File

@@ -0,0 +1,237 @@
use core::fmt;
use eth_types::H256;
use prover::{BatchProof, ChunkHash, ChunkProof};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::coordinator_client::types::GetTaskResponseData;
pub type CommonHash = H256;
pub type Bytes = Vec<u8>;
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofType {
ProofTypeUndefined,
ProofTypeChunk,
ProofTypeBatch,
}
impl ProofType {
fn from_u8(v: u8) -> Self {
match v {
1 => ProofType::ProofTypeChunk,
2 => ProofType::ProofTypeBatch,
_ => ProofType::ProofTypeUndefined,
}
}
}
impl Serialize for ProofType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofType::ProofTypeUndefined => serializer.serialize_i8(0),
ProofType::ProofTypeChunk => serializer.serialize_i8(1),
ProofType::ProofTypeBatch => serializer.serialize_i8(2),
}
}
}
impl<'de> Deserialize<'de> for ProofType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofType::from_u8(v))
}
}
impl Default for ProofType {
fn default() -> Self {
Self::ProofTypeUndefined
}
}
#[derive(Serialize, Deserialize)]
pub struct BatchTaskDetail {
pub chunk_infos: Vec<ChunkHash>,
pub chunk_proofs: Vec<ChunkProof>,
}
#[derive(Serialize, Deserialize)]
pub struct ChunkTaskDetail {
pub block_hashes: Vec<CommonHash>,
}
#[derive(Serialize, Deserialize, Default)]
pub struct Task {
pub uuid: String,
pub id: String,
#[serde(rename = "type", default)]
pub task_type: ProofType,
#[serde(default)]
pub batch_task_detail: Option<BatchTaskDetail>,
#[serde(default)]
pub chunk_task_detail: Option<ChunkTaskDetail>,
#[serde(default)]
pub hard_fork_name: Option<String>,
}
impl Task {
pub fn get_version(&self) -> String {
match self.hard_fork_name.as_ref() {
Some(v) => v.clone(),
None => "".to_string(),
}
}
}
impl TryFrom<&GetTaskResponseData> for Task {
type Error = serde_json::Error;
fn try_from(value: &GetTaskResponseData) -> Result<Self, Self::Error> {
let mut task = Task {
uuid: value.uuid.clone(),
id: value.task_id.clone(),
task_type: value.task_type,
chunk_task_detail: None,
batch_task_detail: None,
hard_fork_name: value.hard_fork_name.clone(),
};
match task.task_type {
ProofType::ProofTypeBatch => {
task.batch_task_detail = Some(serde_json::from_str(&value.task_data)?);
}
ProofType::ProofTypeChunk => {
task.chunk_task_detail = Some(serde_json::from_str(&value.task_data)?);
}
_ => unreachable!(),
}
Ok(task)
}
}
#[derive(Serialize, Deserialize, Default)]
pub struct TaskWrapper {
pub task: Task,
count: usize,
}
impl TaskWrapper {
pub fn increment_count(&mut self) {
self.count += 1;
}
pub fn get_count(&self) -> usize {
self.count
}
}
impl From<Task> for TaskWrapper {
fn from(task: Task) -> Self {
TaskWrapper { task, count: 0 }
}
}
#[derive(Serialize, Deserialize, Default)]
pub struct ProofDetail {
pub id: String,
#[serde(rename = "type", default)]
pub proof_type: ProofType,
pub chunk_proof: Option<ChunkProof>,
pub batch_proof: Option<BatchProof>,
pub error: String,
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofFailureType {
Undefined,
Panic,
NoPanic,
}
impl ProofFailureType {
fn from_u8(v: u8) -> Self {
match v {
1 => ProofFailureType::Panic,
2 => ProofFailureType::NoPanic,
_ => ProofFailureType::Undefined,
}
}
}
impl Serialize for ProofFailureType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofFailureType::Undefined => serializer.serialize_u8(0),
ProofFailureType::Panic => serializer.serialize_u8(1),
ProofFailureType::NoPanic => serializer.serialize_u8(2),
}
}
}
impl<'de> Deserialize<'de> for ProofFailureType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofFailureType::from_u8(v))
}
}
impl Default for ProofFailureType {
fn default() -> Self {
Self::Undefined
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofStatus {
Ok,
Error,
}
impl ProofStatus {
fn from_u8(v: u8) -> Self {
match v {
0 => ProofStatus::Ok,
_ => ProofStatus::Error,
}
}
}
impl Serialize for ProofStatus {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofStatus::Ok => serializer.serialize_u8(0),
ProofStatus::Error => serializer.serialize_u8(1),
}
}
}
impl<'de> Deserialize<'de> for ProofStatus {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofStatus::from_u8(v))
}
}
impl Default for ProofStatus {
fn default() -> Self {
Self::Ok
}
}

View File

@@ -0,0 +1,11 @@
use env_logger::Env;
use std::sync::Once;
static LOG_INIT: Once = Once::new();
/// Initialize log
pub fn log_init() {
LOG_INIT.call_once(|| {
env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();
});
}

View File

@@ -0,0 +1,17 @@
use std::cell::OnceCell;
static DEFAULT_COMMIT: &str = "unknown";
static mut VERSION: OnceCell<String> = OnceCell::new();
pub const TAG: &str = "v4.4.3";
pub const DEFAULT_ZK_VERSION: &str = "000000-000000";
fn init_version() -> String {
let commit = option_env!("GIT_REV").unwrap_or(DEFAULT_COMMIT);
let zk_version = option_env!("ZK_VERSION").unwrap_or(DEFAULT_ZK_VERSION);
format!("{TAG}-{commit}-{zk_version}")
}
pub fn get_version() -> String {
unsafe { VERSION.get_or_init(init_version).clone() }
}

View File

@@ -0,0 +1,89 @@
mod base;
// mod next;
mod types;
use anyhow::Result;
use base::BaseCircuitsHandler;
use std::collections::HashMap;
use types::{BatchProof, BlockTrace, ChunkHash, ChunkProof};
use crate::types::ProofType;
// use self::next::NextCircuitsHandler;
type CiruitsVersion = String;
pub mod utils {
pub fn encode_vk(vk: Vec<u8>) -> String {
base64::encode(vk)
}
}
pub trait CircuitsHandler {
// api of zkevm::Prover
fn prover_get_vk(&self) -> Option<Vec<u8>>;
fn prover_gen_chunk_proof(
&self,
chunk_trace: Vec<BlockTrace>,
name: Option<&str>,
inner_id: Option<&str>,
output_dir: Option<&str>,
) -> Result<ChunkProof>;
// api of aggregator::Prover
fn aggregator_get_vk(&self) -> Option<Vec<u8>>;
fn aggregator_gen_agg_evm_proof(
&self,
chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>,
name: Option<&str>,
output_dir: Option<&str>,
) -> Result<BatchProof>;
fn aggregator_check_chunk_proofs(&self, chunk_proofs: &[ChunkProof]) -> Result<bool>;
}
pub struct CircuitsHandlerProvider {
proof_type: ProofType,
circuits_handler_map: HashMap<CiruitsVersion, Box<dyn CircuitsHandler>>,
}
impl CircuitsHandlerProvider {
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str) -> Result<Self> {
let mut m: HashMap<CiruitsVersion, Box<dyn CircuitsHandler>> = HashMap::new();
let handler = BaseCircuitsHandler::new(proof_type, params_dir, assets_dir)?;
m.insert("".to_string(), Box::new(handler));
// let next_handler: NextCircuitsHandler = NextCircuitsHandler::new(proof_type, params_dir,
// assets_dir)?; m.insert("".to_string(), Box::new(next_handler));
Ok(CircuitsHandlerProvider {
proof_type: proof_type,
circuits_handler_map: m,
})
}
pub fn get_circuits_client(&self, version: String) -> Option<&Box<dyn CircuitsHandler>> {
self.circuits_handler_map.get(&version)
}
pub fn get_vks(&self) -> Vec<String> {
match self.proof_type {
ProofType::ProofTypeBatch => self
.circuits_handler_map
.values()
.map(|h| {
h.aggregator_get_vk()
.map_or("".to_string(), |vk| utils::encode_vk(vk))
})
.collect::<Vec<String>>(),
ProofType::ProofTypeChunk => self
.circuits_handler_map
.values()
.map(|h| {
h.prover_get_vk()
.map_or("".to_string(), |vk| utils::encode_vk(vk))
})
.collect::<Vec<String>>(),
_ => unreachable!(),
}
}
}

View File

@@ -0,0 +1,91 @@
use super::{
types::{BatchProof, BlockTrace, ChunkHash, ChunkProof},
CircuitsHandler,
};
use crate::types::ProofType;
use anyhow::{bail, Ok, Result};
use prover::{aggregator::Prover as BatchProver, zkevm::Prover as ChunkProver};
use std::cell::RefCell;
#[derive(Default)]
pub struct BaseCircuitsHandler {
chunk_prover: Option<RefCell<ChunkProver>>,
batch_prover: Option<RefCell<BatchProver>>,
}
impl BaseCircuitsHandler {
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str) -> Result<Self> {
match proof_type {
ProofType::ProofTypeChunk => Ok(Self {
chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))),
..Default::default()
}),
ProofType::ProofTypeBatch => Ok(Self {
batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))),
..Default::default()
}),
_ => bail!("proof type invalid"),
}
}
}
impl CircuitsHandler for BaseCircuitsHandler {
// api of zkevm::Prover
fn prover_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [base], [chunk] get_vk");
self.chunk_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn prover_gen_chunk_proof(
&self,
chunk_trace: Vec<BlockTrace>,
name: Option<&str>,
inner_id: Option<&str>,
output_dir: Option<&str>,
) -> Result<ChunkProof> {
log::info!("[circuit handler], [base], [chunk] gen_chunk_proof");
if let Some(prover) = self.chunk_prover.as_ref() {
return prover
.borrow_mut()
.gen_chunk_proof(chunk_trace, name, inner_id, output_dir);
}
unreachable!("please check errors in proof_type logic")
}
// api of aggregator::Prover
fn aggregator_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [base], [batch] get_vk");
self.batch_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn aggregator_gen_agg_evm_proof(
&self,
chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>,
name: Option<&str>,
output_dir: Option<&str>,
) -> Result<BatchProof> {
log::info!("[circuit handler], [base], [batch] gen_agg_evm_proof");
if let Some(prover) = self.batch_prover.as_ref() {
return prover
.borrow_mut()
.gen_agg_evm_proof(chunk_hashes_proofs, name, output_dir);
}
unreachable!("please check errors in proof_type logic")
}
fn aggregator_check_chunk_proofs(&self, chunk_proofs: &[ChunkProof]) -> Result<bool> {
log::info!("[circuit handler], [base], [batch] check_chunk_proofs");
if let Some(prover) = self.batch_prover.as_ref() {
return Ok(prover.borrow_mut().check_chunk_proofs(chunk_proofs));
}
unreachable!("please check errors in proof_type logic")
}
}

View File

@@ -0,0 +1,120 @@
use super::{types::*, CircuitsHandler};
use crate::types::ProofType;
use anyhow::{bail, Ok, Result};
use prover_next::{aggregator::Prover as NextBatchProver, zkevm::Prover as NextChunkProver};
use std::cell::RefCell;
#[derive(Default)]
pub struct NextCircuitsHandler {
chunk_prover: Option<RefCell<NextChunkProver>>,
batch_prover: Option<RefCell<NextBatchProver>>,
}
impl NextCircuitsHandler {
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str) -> Result<Self> {
match proof_type {
ProofType::ProofTypeChunk => Ok(Self {
chunk_prover: Some(RefCell::new(NextChunkProver::from_dirs(
params_dir, assets_dir,
))),
..Default::default()
}),
ProofType::ProofTypeBatch => Ok(Self {
batch_prover: Some(RefCell::new(NextBatchProver::from_dirs(
params_dir, assets_dir,
))),
..Default::default()
}),
_ => bail!("proof type invalid"),
}
}
}
impl CircuitsHandler for NextCircuitsHandler {
// api of zkevm::Prover
fn prover_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [next], [chunk] get_vk");
self.chunk_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn prover_gen_chunk_proof(
&self,
chunk_trace: Vec<BlockTrace>,
name: Option<&str>,
inner_id: Option<&str>,
output_dir: Option<&str>,
) -> Result<ChunkProof> {
log::info!("[circuit handler], [next], [chunk] gen_chunk_proof");
if let Some(prover) = self.chunk_prover.as_ref() {
let next_chunk_trace = chunk_trace
.into_iter()
.map(|block_trace| block_trace_base_to_next(block_trace))
.collect::<Result<Vec<NextBlockTrace>>>()?;
let next_chunk_proof = prover.borrow_mut().gen_chunk_proof(
next_chunk_trace,
name,
inner_id,
output_dir,
)?;
return chunk_proof_next_to_base(next_chunk_proof);
}
unreachable!("please check errors in proof_type logic")
}
// api of aggregator::Prover
fn aggregator_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [next], [batch] get_vk");
self.batch_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn aggregator_gen_agg_evm_proof(
&self,
chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>,
name: Option<&str>,
output_dir: Option<&str>,
) -> Result<BatchProof> {
log::info!("[circuit handler], [next], [batch] gen_agg_evm_proof");
if let Some(prover) = self.batch_prover.as_ref() {
let next_chunk_hashes_proofs = chunk_hashes_proofs
.into_iter()
.map(|t| {
let next_chunk_hash = chunk_hash_base_to_next(t.0);
let next_chunk_proof = chunk_proof_base_to_next(&t.1);
match next_chunk_proof {
Result::Ok(proof) => Ok((next_chunk_hash, proof)),
Err(err) => Err(err),
}
})
.collect::<Result<Vec<(NextChunkHash, NextChunkProof)>>>()?;
let next_batch_proof = prover.borrow_mut().gen_agg_evm_proof(
next_chunk_hashes_proofs,
name,
output_dir,
)?;
return batch_proof_next_to_base(next_batch_proof);
}
unreachable!("please check errors in proof_type logic")
}
fn aggregator_check_chunk_proofs(&self, chunk_proofs: &[ChunkProof]) -> Result<bool> {
log::info!("[circuit handler], [next], [batch] check_chunk_proofs");
if let Some(prover) = self.batch_prover.as_ref() {
let next_chunk_proofs = chunk_proofs
.into_iter()
.map(|chunk_proof| chunk_proof_base_to_next(chunk_proof))
.collect::<Result<Vec<NextChunkProof>>>()?;
return Ok(prover.borrow_mut().check_chunk_proofs(&next_chunk_proofs));
}
unreachable!("please check errors in proof_type logic")
}
}

View File

@@ -0,0 +1,69 @@
use anyhow::Result;
pub use prover::{BatchProof, BlockTrace, ChunkHash, ChunkProof, Proof};
// pub use prover_next::{
// BatchProof as NextBatchProof, BlockTrace as NextBlockTrace, ChunkHash as NextChunkHash,
// ChunkProof as NextChunkProof, Proof as NextProof,
// };
// pub fn chunk_proof_next_to_base(next: NextChunkProof) -> Result<ChunkProof> {
// let proof_bytes = serde_json::to_string(&next.proof)?;
// let proof: Proof = serde_json::from_str(&proof_bytes)?;
// let chunk_hash = next.chunk_hash.map(|hash| ChunkHash {
// chain_id: hash.chain_id,
// prev_state_root: hash.prev_state_root,
// post_state_root: hash.post_state_root,
// withdraw_root: hash.withdraw_root,
// data_hash: hash.data_hash,
// tx_bytes: hash.tx_bytes,
// is_padding: hash.is_padding,
// });
// Ok(ChunkProof {
// protocol: next.protocol,
// proof,
// chunk_hash,
// })
// }
// pub fn batch_proof_next_to_base(next: NextBatchProof) -> Result<BatchProof> {
// let proof_bytes = serde_json::to_string(&next)?;
// serde_json::from_str(&proof_bytes).map_err(|err| anyhow::anyhow!(err))
// }
// pub fn chunk_proof_base_to_next(base: &ChunkProof) -> Result<NextChunkProof> {
// let proof_bytes = serde_json::to_string(&base.proof)?;
// let proof: NextProof = serde_json::from_str(&proof_bytes)?;
// let chunk_hash = base.chunk_hash.clone().map(|hash| NextChunkHash {
// chain_id: hash.chain_id,
// prev_state_root: hash.prev_state_root,
// post_state_root: hash.post_state_root,
// withdraw_root: hash.withdraw_root,
// data_hash: hash.data_hash,
// tx_bytes: hash.tx_bytes,
// is_padding: hash.is_padding,
// });
// Ok(NextChunkProof {
// protocol: base.protocol.clone(),
// proof,
// chunk_hash,
// })
// }
// pub fn chunk_hash_base_to_next(base: ChunkHash) -> NextChunkHash {
// NextChunkHash {
// chain_id: base.chain_id,
// prev_state_root: base.prev_state_root,
// post_state_root: base.post_state_root,
// withdraw_root: base.withdraw_root,
// data_hash: base.data_hash,
// tx_bytes: base.tx_bytes,
// is_padding: base.is_padding,
// }
// }
// pub fn block_trace_base_to_next(base: BlockTrace) -> Result<NextBlockTrace> {
// let trace_bytes = serde_json::to_string(&base)?;
// serde_json::from_str(&trace_bytes).map_err(|err| anyhow::anyhow!(err))
// }

View File

@@ -16,7 +16,7 @@ go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
2. `solc`
Ensure you install the version of solc required by [MockBridge.sol](./mock_bridge/MockBridge.sol#L2) (e.g., 0.8.24). See https://docs.soliditylang.org/en/latest/installing-solidity.html
See https://docs.soliditylang.org/en/latest/installing-solidity.html
## Build
@@ -31,7 +31,7 @@ make rollup_bins
(Note: make sure you use different private keys for different senders in config.json.)
```bash
./build/bin/event_watcher --config ./conf/config.json
./build/bin/gas_oracle --config ./conf/config.json
./build/bin/rollup_relayer --config ./conf/config.json
./build/bin/event_watcher --config ./config.json
./build/bin/gas_oracle --config ./config.json
./build/bin/rollup_relayer --config ./config.json
```

View File

@@ -15,8 +15,7 @@
"escalate_multiple_den": 1,
"max_gas_price": 1000000000000,
"tx_type": "LegacyTx",
"check_pending_time": 1,
"min_gas_tip": 100000000
"check_pending_time": 1
},
"gas_oracle_config": {
"min_gas_price": 0,
@@ -43,8 +42,7 @@
"max_gas_price": 1000000000000,
"max_blob_gas_price": 10000000000000,
"tx_type": "DynamicFeeTx",
"check_pending_time": 1,
"min_gas_tip": 100000000
"check_pending_time": 1
},
"gas_oracle_config": {
"min_gas_price": 0,

View File

@@ -5,12 +5,12 @@ go 1.21
require (
github.com/agiledragon/gomonkey/v2 v2.11.0
github.com/consensys/gnark-crypto v0.12.1
github.com/crate-crypto/go-kzg-4844 v1.0.0
github.com/crate-crypto/go-kzg-4844 v0.7.0
github.com/gin-gonic/gin v1.9.1
github.com/go-resty/resty/v2 v2.7.0
github.com/holiman/uint256 v1.2.4
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
@@ -87,7 +87,7 @@ require (
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/scroll-tech/zktrie v0.7.1 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/smartystreets/assertions v1.13.1 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect

View File

@@ -43,8 +43,8 @@ github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJ
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -237,10 +237,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=

View File

@@ -26,8 +26,6 @@ type SenderConfig struct {
EscalateMultipleDen uint64 `json:"escalate_multiple_den"`
// The maximum gas price can be used to send transaction.
MaxGasPrice uint64 `json:"max_gas_price"`
// The minimum gas tip can be used to send transaction.
MinGasTip uint64 `json:"min_gas_tip"`
// The maximum blob gas price can be used to send transaction.
MaxBlobGasPrice uint64 `json:"max_blob_gas_price"`
// The transaction type to use: LegacyTx, DynamicFeeTx, BlobTx

View File

@@ -17,12 +17,6 @@ func (s *Sender) estimateLegacyGas(to *common.Address, data []byte, fallbackGasL
log.Error("estimateLegacyGas SuggestGasPrice failure", "error", err)
return nil, err
}
minGasTip := new(big.Int).SetUint64(s.config.MinGasTip)
if gasPrice.Cmp(minGasTip) < 0 {
gasPrice = minGasTip
}
gasLimit, _, err := s.estimateGasLimit(to, data, nil, gasPrice, nil, nil, nil)
if err != nil {
log.Error("estimateLegacyGas estimateGasLimit failure", "gas price", gasPrice, "from", s.auth.From.String(),
@@ -47,11 +41,6 @@ func (s *Sender) estimateDynamicGas(to *common.Address, data []byte, baseFee uin
return nil, err
}
minGasTip := new(big.Int).SetUint64(s.config.MinGasTip)
if gasTipCap.Cmp(minGasTip) < 0 {
gasTipCap = minGasTip
}
gasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
gasLimit, accessList, err := s.estimateGasLimit(to, data, nil, nil, gasTipCap, gasFeeCap, nil)
if err != nil {
@@ -83,11 +72,6 @@ func (s *Sender) estimateBlobGas(to *common.Address, data []byte, sidecar *gethT
return nil, err
}
minGasTip := new(big.Int).SetUint64(s.config.MinGasTip)
if gasTipCap.Cmp(minGasTip) < 0 {
gasTipCap = minGasTip
}
gasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
blobGasFeeCap := getBlobGasFeeCap(new(big.Int).SetUint64(blobBaseFee))
gasLimit, accessList, err := s.estimateGasLimit(to, data, sidecar, nil, gasTipCap, gasFeeCap, blobGasFeeCap)

View File

@@ -618,13 +618,13 @@ func makeSidecar(blob *kzg4844.Blob) (*gethTypes.BlobTxSidecar, error) {
var commitments []kzg4844.Commitment
var proofs []kzg4844.Proof
for i := range blobs {
c, err := kzg4844.BlobToCommitment(&blobs[i])
for _, b := range blobs {
c, err := kzg4844.BlobToCommitment(b)
if err != nil {
return nil, fmt.Errorf("failed to get blob commitment, err: %w", err)
}
p, err := kzg4844.ComputeBlobProof(&blobs[i], c)
p, err := kzg4844.ComputeBlobProof(b, c)
if err != nil {
return nil, fmt.Errorf("failed to compute blob proof, err: %w", err)
}

View File

@@ -751,7 +751,7 @@ func testBlobTransactionWithBlobhashOpContractCall(t *testing.T) {
pointBigInt := new(big.Int).SetBytes(pointHash.Bytes())
point := kzg4844.Point(new(big.Int).Mod(pointBigInt, blsModulo).Bytes())
commitment := sideCar.Commitments[0]
proof, claim, err := kzg4844.ComputeProof(blob, point)
proof, claim, err := kzg4844.ComputeProof(*blob, point)
assert.NoError(t, err)
var claimArray [32]byte

View File

@@ -192,69 +192,27 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
tests := []struct {
name string
maxChunkNum uint64
maxL1CommitGas uint64
maxL1CommitCalldataSize uint64
batchTimeoutSec uint64
forkBlock *big.Int
expectedBatchesLen int
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
}{
{
name: "NoLimitReached",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
name: "NoLimitReached",
maxChunkNum: 10,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "Timeout",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 0,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 2,
},
{
name: "MaxL1CommitGasPerBatchIs0",
maxChunkNum: 10,
maxL1CommitGas: 0,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "MaxL1CommitCalldataSizePerBatchIs0",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 0,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "MaxChunkNumPerBatchIs1",
maxChunkNum: 1,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
{
name: "MaxL1CommitGasPerBatchIsFirstChunk",
maxChunkNum: 10,
maxL1CommitGas: 190352,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
{
name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 60,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
@@ -262,8 +220,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
{
name: "ForkBlockReached",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
@@ -287,7 +243,7 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV1)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
@@ -296,7 +252,7 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV1)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
@@ -306,34 +262,32 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{
BernoulliBlock: big.NewInt(0),
HomesteadBlock: tt.forkBlock,
BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
}, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: tt.maxChunkNum,
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
BatchTimeoutSec: tt.batchTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{
BernoulliBlock: big.NewInt(0),
HomesteadBlock: tt.forkBlock,
BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
}, db, nil)
bp.TryProposeBatch()
@@ -359,7 +313,7 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
}
}
func testBatchCommitGasAndCalldataSizeCodecv0Estimation(t *testing.T) {
func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
@@ -439,86 +393,6 @@ func testBatchCommitGasAndCalldataSizeCodecv0Estimation(t *testing.T) {
assert.Equal(t, uint64(6035), batches[0].TotalL1CommitCalldataSize)
}
func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 0,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, 2)
batches = batches[1:]
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Len(t, dbChunks, 2)
for _, chunk := range dbChunks {
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
}
assert.Equal(t, uint64(161270), batches[0].TotalL1CommitGas)
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
}
func testBatchProposerBlobSizeLimit(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
@@ -549,8 +423,8 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
@@ -569,8 +443,8 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: math.MaxUint64,
MaxL1CommitGasPerBatch: math.MaxUint64,
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
BatchTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)

View File

@@ -205,8 +205,6 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
name string
maxBlockNum uint64
maxTxNum uint64
maxL1CommitGas uint64
maxL1CommitCalldataSize uint64
maxRowConsumption uint64
chunkTimeoutSec uint64
forkBlock *big.Int
@@ -214,72 +212,42 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
}{
{
name: "NoLimitReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
name: "NoLimitReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "Timeout",
maxBlockNum: 100,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 0,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 2,
},
{
name: "MaxTxNumPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 0,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
name: "MaxTxNumPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 0,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxL1CommitGasPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 10000,
maxL1CommitGas: 0,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxL1CommitCalldataSizePerChunkIs0",
maxBlockNum: 10,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 0,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxRowConsumptionPerChunkIs0",
maxBlockNum: 100,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 0,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
name: "MaxRowConsumptionPerChunkIs0",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 0,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxBlockNumPerChunkIs1",
maxBlockNum: 1,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
@@ -289,30 +257,6 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
name: "MaxTxNumPerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 2,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxL1CommitGasPerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 10000,
maxL1CommitGas: 2522,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 60,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
@@ -322,8 +266,6 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
name: "MaxRowConsumptionPerChunkIs1",
maxBlockNum: 10,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
@@ -333,8 +275,6 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
name: "ForkBlockReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
@@ -355,14 +295,12 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: tt.maxBlockNum,
MaxTxNumPerChunk: tt.maxTxNum,
MaxL1CommitGasPerChunk: tt.maxL1CommitGas,
MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
ChunkTimeoutSec: tt.chunkTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{
BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
}, db, nil)
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock}, db, nil)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
@@ -399,8 +337,8 @@ func testChunkProposerCodecv1BlobSizeLimit(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,

View File

@@ -109,11 +109,10 @@ func TestFunction(t *testing.T) {
t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits)
t.Run("TestChunkProposerCodecv1BlobSizeLimit", testChunkProposerCodecv1BlobSizeLimit)
// Run batch proposer test cases.
// Run chunk proposer test cases.
t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits)
t.Run("TestBatchProposerCodecv1Limits", testBatchProposerCodecv1Limits)
t.Run("TestBatchCommitGasAndCalldataSizeCodecv0Estimation", testBatchCommitGasAndCalldataSizeCodecv0Estimation)
t.Run("TestBatchCommitGasAndCalldataSizeCodecv1Estimation", testBatchCommitGasAndCalldataSizeCodecv1Estimation)
t.Run("TestBatchCommitGasAndCalldataSizeEstimation", testBatchCommitGasAndCalldataSizeEstimation)
t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit)
}

View File

@@ -76,6 +76,7 @@ type ChunkMetrics struct {
CrcMax uint64
FirstBlockTimestamp uint64
// codecv0 metrics, default 0 for codecv1
L1CommitCalldataSize uint64
L1CommitGas uint64
@@ -107,8 +108,6 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
}
return metrics, nil
case encoding.CodecV1:
metrics.L1CommitGas = codecv1.EstimateChunkL1CommitGas(chunk)
metrics.L1CommitCalldataSize = codecv1.EstimateChunkL1CommitCalldataSize(chunk)
metrics.L1CommitBlobSize, err = codecv1.EstimateChunkL1CommitBlobSize(chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit blob size: %w", err)
@@ -125,6 +124,7 @@ type BatchMetrics struct {
NumChunks uint64
FirstBlockTimestamp uint64
// codecv0 metrics, default 0 for codecv1
L1CommitCalldataSize uint64
L1CommitGas uint64
@@ -151,8 +151,6 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
}
return metrics, nil
case encoding.CodecV1:
metrics.L1CommitGas = codecv1.EstimateBatchL1CommitGas(batch)
metrics.L1CommitCalldataSize = codecv1.EstimateBatchL1CommitCalldataSize(batch)
metrics.L1CommitBlobSize, err = codecv1.EstimateBatchL1CommitBlobSize(batch)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit blob size: %w", err)

View File

@@ -221,16 +221,16 @@ func testCommitBatchAndFinalizeBatch4844(t *testing.T) {
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 1000000,
MaxL1CommitCalldataSizePerChunk: 100000,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, chainConfig, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 1000000,
MaxL1CommitCalldataSizePerBatch: 100000,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
BatchTimeoutSec: 300,
}, chainConfig, db, nil)