Compare commits

..

18 Commits

Author SHA1 Message Date
colin
320ab56d1d feat(codecv1): add commit gas and calldata size estimation (#1324)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-05-07 16:35:49 +08:00
georgehao
54415f6a78 feat: fix start poS failed (#1330) 2024-05-07 16:22:58 +08:00
joao
87c9c33bcc docs: update README.md to specify solc version and correct config paths (#1314)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-05-07 10:59:52 +08:00
georgehao
fdcd43a296 feat: fix lint failed (#1326) 2024-05-07 10:35:32 +08:00
lugosi
a1a7f25921 build: update golangci-lint (#1321)
Co-authored-by: kongfanfu <kongfanfu@bytedance.com>
2024-05-06 09:47:41 +08:00
JayLiu
8be70f0c80 fix start testcontainers fail bug (#1313) 2024-05-02 21:12:20 +08:00
JayLiu
d1bec53e50 test: replace l1GethContainer with poSL1Container (#1312)
Co-authored-by: TKTech660 <liujay48@gmail.com>
2024-04-30 16:25:22 +08:00
SamiAlHassan
0723b463c5 build: upgrade go-ethereum from scroll-v5.1.6 to scroll-v5.3.0 (#1304)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-30 16:25:12 +08:00
Xin.Zh
46b1ff3284 modify testcontainer (#1302)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-30 14:44:26 +08:00
alwayshang
a3635dba52 docs(coordinator): fix function name (#1297)
Signed-off-by: alwayshang <zhanghonghao@outlook.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-30 11:32:48 +08:00
colin
34ad8ca772 fix(batch-proposer): potential panic risk (#1311) 2024-04-30 08:46:13 +08:00
yanziseeker
8a2a2eb292 docs:clean repetitive words (#1309) 2024-04-28 16:42:48 +08:00
Andi
8ca89374a0 chore: remove repetitive words (#1289)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-28 14:10:21 +08:00
Xi Lin
8128526116 feat(contracts): batch token bridge (#1282) 2024-04-28 12:11:15 +08:00
Péter Garamvölgyi
9262e9af69 Reapply "build(ci): make all intermediate images compatible with multi platforms (#1291)"
This reverts commit 5090b77655.
2024-04-28 11:25:10 +08:00
Péter Garamvölgyi
5090b77655 Revert "build(ci): make all intermediate images compatible with multi platforms (#1291)"
This reverts commit 8f8f6eb1a1.
2024-04-28 10:54:17 +08:00
colin
4cafc9349a feat(gas-oracle): tweak gas price update logic (#1305) 2024-04-28 10:47:04 +08:00
colin
ca6f856372 docs: remove local testing image Dockerfile and docs for mac M1/M2 silicon (#1300) 2024-04-25 08:32:57 +08:00
60 changed files with 2523 additions and 453 deletions

View File

@@ -1,6 +1,6 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.1.6
L2GETH_TAG=scroll-v5.3.0
help: ## Display this help message
@grep -h \
@@ -47,11 +47,5 @@ dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
docker build -t scroll_test_image -f ./build/dockerfiles/local_testing.Dockerfile $$(mktemp -d)
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app -e HOST_PATH=$(PWD) scroll_test_image
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -43,8 +43,6 @@ make dev_docker
## Testing Rollup & Coordinator
### For Non-Apple Silicon (M1/M2) Macs
Run the tests using the following commands:
```bash
@@ -54,39 +52,6 @@ go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
### For Apple Silicon (M1/M2) Macs
To run tests on Apple Silicon Macs, build and execute the Docker image as outlined below:
#### Build a Docker Image for Testing
Use the following command to build a Docker image:
```bash
make build_test_docker
```
This command builds a Docker image named `scroll_test_image` using the Dockerfile found at `./build/dockerfiles/local_test.Dockerfile`.
#### Run Docker Image
After the image is built, run a Docker container from it:
```bash
make run_test_docker
```
This command runs a Docker container named `scroll_test_container` from the `scroll_test_image` image. The container uses the host network and has access to the Docker socket and the current directory.
Once the Docker container is running, execute the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
## Testing Contracts
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).

View File

@@ -104,10 +104,12 @@ linters-settings:
# minimal occurrences count to trigger, 3 by default
min-occurrences: 3
depguard:
list-type: blacklist
include-go-root: false
packages:
- github.com/davecgh/go-spew/spew
rules:
main:
files:
- $all
deny:
- pkg: "github.com/davecgh/go-spew/spew"
misspell:
# Correct spellings using locale preferences for US or UK.
# Default is to use a neutral variety of English.

View File

@@ -1,11 +0,0 @@
# Start from the latest golang base image
FROM golang:1.21
# Install Docker
RUN apt-get update && apt-get install -y docker.io docker-compose
# Set the working directory
WORKDIR /go/src/app
# This container will be executable
ENTRYPOINT [ "/bin/bash" ]

View File

@@ -15,7 +15,7 @@ import (
const (
// GolangCIVersion to be used for linting.
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2"
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.2"
)
// GOBIN environment variable.
@@ -51,7 +51,7 @@ func lint() {
}
cmd = exec.Command(filepath.Join(goBin(), "golangci-lint"))
cmd.Args = append(cmd.Args, "run", "--config", "../build/.golangci.yml")
cmd.Args = append(cmd.Args, "run", "--config", "../build/.golangci.yml", "--timeout", "10m")
if *v {
cmd.Args = append(cmd.Args, "-v")

View File

@@ -1,127 +0,0 @@
package dockercompose
import (
"context"
"crypto/rand"
"fmt"
"math/big"
"os"
"path/filepath"
"time"
"github.com/cloudflare/cfssl/log"
"github.com/scroll-tech/go-ethereum/ethclient"
tc "github.com/testcontainers/testcontainers-go/modules/compose"
"github.com/testcontainers/testcontainers-go/wait"
)
// PoSL1TestEnv represents the config needed to test in PoS Layer 1.
type PoSL1TestEnv struct {
dockerComposeFile string
compose tc.ComposeStack
gethHTTPPort int
hostPath string
}
// NewPoSL1TestEnv creates and initializes a new instance of PoSL1TestEnv with a random HTTP port.
func NewPoSL1TestEnv() (*PoSL1TestEnv, error) {
rootDir, err := findProjectRootDir()
if err != nil {
return nil, fmt.Errorf("failed to find project root directory: %v", err)
}
hostPath, found := os.LookupEnv("HOST_PATH")
if !found {
hostPath = ""
}
rnd, err := rand.Int(rand.Reader, big.NewInt(65536-1024))
if err != nil {
return nil, fmt.Errorf("failed to generate a random: %v", err)
}
gethHTTPPort := int(rnd.Int64()) + 1024
if err := os.Setenv("GETH_HTTP_PORT", fmt.Sprintf("%d", gethHTTPPort)); err != nil {
return nil, fmt.Errorf("failed to set GETH_HTTP_PORT: %v", err)
}
return &PoSL1TestEnv{
dockerComposeFile: filepath.Join(rootDir, "common", "docker-compose", "l1", "docker-compose.yml"),
gethHTTPPort: gethHTTPPort,
hostPath: hostPath,
}, nil
}
// Start starts the PoS L1 test environment by running the associated Docker Compose configuration.
func (e *PoSL1TestEnv) Start() error {
var err error
e.compose, err = tc.NewDockerCompose([]string{e.dockerComposeFile}...)
if err != nil {
return fmt.Errorf("failed to create docker compose: %w", err)
}
env := map[string]string{
"GETH_HTTP_PORT": fmt.Sprintf("%d", e.gethHTTPPort),
}
if e.hostPath != "" {
env["HOST_PATH"] = e.hostPath
}
if err = e.compose.WaitForService("geth", wait.NewHTTPStrategy("/").WithPort("8545/tcp").WithStartupTimeout(15*time.Second)).WithEnv(env).Up(context.Background()); err != nil {
if errStop := e.Stop(); errStop != nil {
log.Error("failed to stop PoS L1 test environment", "err", errStop)
}
return fmt.Errorf("failed to start PoS L1 test environment: %w", err)
}
return nil
}
// Stop stops the PoS L1 test environment by stopping and removing the associated Docker Compose services.
func (e *PoSL1TestEnv) Stop() error {
if e.compose != nil {
if err := e.compose.Down(context.Background(), tc.RemoveOrphans(true), tc.RemoveVolumes(true), tc.RemoveImagesLocal); err != nil {
return fmt.Errorf("failed to stop PoS L1 test environment: %w", err)
}
}
return nil
}
// Endpoint returns the HTTP endpoint for the PoS L1 test environment.
func (e *PoSL1TestEnv) Endpoint() string {
return fmt.Sprintf("http://127.0.0.1:%d", e.gethHTTPPort)
}
// L1Client returns an ethclient by dialing the running PoS L1 test environment
func (e *PoSL1TestEnv) L1Client() (*ethclient.Client, error) {
if e == nil {
return nil, fmt.Errorf("PoS L1 test environment is not initialized")
}
client, err := ethclient.Dial(e.Endpoint())
if err != nil {
return nil, fmt.Errorf("failed to dial PoS L1 test environment: %w", err)
}
return client, nil
}
func findProjectRootDir() (string, error) {
currentDir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get working directory: %w", err)
}
for {
_, err := os.Stat(filepath.Join(currentDir, "go.work"))
if err == nil {
return currentDir, nil
}
parentDir := filepath.Dir(currentDir)
if parentDir == currentDir {
return "", fmt.Errorf("go.work file not found in any parent directory")
}
currentDir = parentDir
}
}

View File

@@ -5,7 +5,6 @@ go 1.21
require (
github.com/Masterminds/semver/v3 v3.2.1
github.com/bits-and-blooms/bitset v1.12.0
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004
github.com/docker/docker v25.0.3+incompatible
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1
@@ -14,7 +13,7 @@ require (
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/stretchr/testify v1.9.0
github.com/testcontainers/testcontainers-go v0.28.0
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0
@@ -62,7 +61,7 @@ require (
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/cpuguy83/dockercfg v0.3.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/distribution/reference v0.5.0 // indirect
@@ -183,7 +182,7 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.7.1 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
github.com/shibumi/go-pathspec v1.3.0 // indirect

View File

@@ -148,8 +148,8 @@ github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoY
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
@@ -607,10 +607,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=

View File

@@ -8,14 +8,14 @@ services:
mkdir -p /data/execution &&
cp -a /execution/* /data/execution/"
volumes:
- ${HOST_PATH:-../../..}/common/docker-compose/l1/consensus:/consensus
- ${HOST_PATH:-../../..}/common/docker-compose/l1/execution:/execution
- ../../common/testcontainers/consensus:/consensus
- ../../common/testcontainers/execution:/execution
- data:/data
# Creates a genesis state for the beacon chain using a YAML configuration file and
# a deterministic set of 64 validators.
create-beacon-chain-genesis:
image: "gcr.io/prysmaticlabs/prysm/cmd/prysmctl:latest"
image: "gcr.io/prysmaticlabs/prysm/cmd/prysmctl:HEAD-263557"
command:
- testnet
- generate-genesis
@@ -96,7 +96,7 @@ services:
- --nodiscover
- --syncmode=full
ports:
- ${GETH_HTTP_PORT:-8545}:8545
- 8545
depends_on:
geth-genesis:
condition: service_completed_successfully

View File

@@ -4,10 +4,13 @@ import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/compose"
"github.com/testcontainers/testcontainers-go/modules/postgres"
"github.com/testcontainers/testcontainers-go/wait"
"gorm.io/gorm"
@@ -18,8 +21,8 @@ import (
// TestcontainerApps testcontainers struct
type TestcontainerApps struct {
postgresContainer *postgres.PostgresContainer
l1GethContainer *testcontainers.DockerContainer
l2GethContainer *testcontainers.DockerContainer
poSL1Container compose.ComposeStack
// common time stamp in nanoseconds.
Timestamp int
@@ -28,6 +31,11 @@ type TestcontainerApps struct {
// NewTestcontainerApps returns new instance of TestcontainerApps struct
func NewTestcontainerApps() *TestcontainerApps {
timestamp := time.Now().Nanosecond()
// In order to solve the problem of "creating reaper failed: failed to create container"
// refer to https://github.com/testcontainers/testcontainers-go/issues/2172
if err := os.Setenv("TESTCONTAINERS_RYUK_DISABLED", "true"); err != nil {
panic("set env failed: " + err.Error())
}
return &TestcontainerApps{
Timestamp: timestamp,
}
@@ -53,33 +61,6 @@ func (t *TestcontainerApps) StartPostgresContainer() error {
return nil
}
// StartL1GethContainer starts a L1Geth container
func (t *TestcontainerApps) StartL1GethContainer() error {
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
return nil
}
req := testcontainers.ContainerRequest{
Image: "scroll_l1geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForAll(
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
),
Cmd: []string{"--log.debug", "ANY"},
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
}
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
if err != nil {
log.Printf("failed to start scroll_l1geth container: %s", err)
return err
}
t.l1GethContainer, _ = container.(*testcontainers.DockerContainer)
return nil
}
// StartL2GethContainer starts a L2Geth container
func (t *TestcontainerApps) StartL2GethContainer() error {
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
@@ -106,6 +87,55 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
return nil
}
// StartPoSL1Container starts the PoS L1 container by running the associated Docker Compose configuration
func (t *TestcontainerApps) StartPoSL1Container() error {
var (
err error
rootDir string
dockerComposeFile string
)
if rootDir, err = findProjectRootDir(); err != nil {
return fmt.Errorf("failed to find project root directory: %v", err)
}
dockerComposeFile = filepath.Join(rootDir, "common", "testcontainers", "docker-compose.yml")
if t.poSL1Container, err = compose.NewDockerCompose([]string{dockerComposeFile}...); err != nil {
return err
}
err = t.poSL1Container.WaitForService("geth", wait.NewHTTPStrategy("/").
WithPort("8545/tcp").
WithStartupTimeout(15*time.Second)).
Up(context.Background())
if err != nil {
t.poSL1Container = nil
return fmt.Errorf("failed to start PoS L1 container: %w", err)
}
return nil
}
// GetPoSL1EndPoint returns the endpoint of the running PoS L1 endpoint
func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
if t.poSL1Container == nil {
return "", fmt.Errorf("PoS L1 container is not running")
}
contrainer, err := t.poSL1Container.ServiceContainer(context.Background(), "geth")
if err != nil {
return "", err
}
return contrainer.PortEndpoint(context.Background(), "8545/tcp", "http")
}
// GetPoSL1Client returns a ethclient by dialing running PoS L1 client
func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
endpoint, err := t.GetPoSL1EndPoint()
if err != nil {
return nil, err
}
return ethclient.Dial(endpoint)
}
// GetDBEndPoint returns the endpoint of the running postgres container
func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
if t.postgresContainer == nil || !t.postgresContainer.IsRunning() {
@@ -114,18 +144,6 @@ func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable")
}
// GetL1GethEndPoint returns the endpoint of the running L1Geth container
func (t *TestcontainerApps) GetL1GethEndPoint() (string, error) {
if t.l1GethContainer == nil || !t.l1GethContainer.IsRunning() {
return "", fmt.Errorf("l1 geth is not running")
}
endpoint, err := t.l1GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
if err != nil {
return "", err
}
return endpoint, nil
}
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() {
@@ -153,19 +171,6 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
return database.InitDB(dbCfg)
}
// GetL1GethClient returns a ethclient by dialing running L1Geth
func (t *TestcontainerApps) GetL1GethClient() (*ethclient.Client, error) {
endpoint, err := t.GetL1GethEndPoint()
if err != nil {
return nil, err
}
client, err := ethclient.Dial(endpoint)
if err != nil {
return nil, err
}
return client, nil
}
// GetL2GethClient returns a ethclient by dialing running L2Geth
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
endpoint, err := t.GetL2GethEndPoint()
@@ -187,14 +192,38 @@ func (t *TestcontainerApps) Free() {
log.Printf("failed to stop postgres container: %s", err)
}
}
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
if err := t.l1GethContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop scroll_l1geth container: %s", err)
}
}
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
if err := t.l2GethContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop scroll_l2geth container: %s", err)
}
}
if t.poSL1Container != nil {
if err := t.poSL1Container.Down(context.Background(), compose.RemoveOrphans(true), compose.RemoveVolumes(true), compose.RemoveImagesLocal); err != nil {
log.Printf("failed to stop PoS L1 container: %s", err)
} else {
t.poSL1Container = nil
}
}
}
// findProjectRootDir find project root directory
func findProjectRootDir() (string, error) {
currentDir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get working directory: %w", err)
}
for {
_, err := os.Stat(filepath.Join(currentDir, "go.work"))
if err == nil {
return currentDir, nil
}
parentDir := filepath.Dir(currentDir)
if parentDir == currentDir {
return "", fmt.Errorf("go.work file not found in any parent directory")
}
currentDir = parentDir
}
}

View File

@@ -17,8 +17,9 @@ func TestNewTestcontainerApps(t *testing.T) {
ethclient *ethclient.Client
)
// test start testcontainers
testApps := NewTestcontainerApps()
// test start testcontainers
assert.NoError(t, testApps.StartPostgresContainer())
endpoint, err = testApps.GetDBEndPoint()
assert.NoError(t, err)
@@ -27,14 +28,6 @@ func TestNewTestcontainerApps(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, gormDBclient)
assert.NoError(t, testApps.StartL1GethContainer())
endpoint, err = testApps.GetL1GethEndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetL1GethClient()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartL2GethContainer())
endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
@@ -43,17 +36,25 @@ func TestNewTestcontainerApps(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartPoSL1Container())
endpoint, err = testApps.GetPoSL1EndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetPoSL1Client()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
// test free testcontainers
testApps.Free()
endpoint, err = testApps.GetDBEndPoint()
assert.EqualError(t, err, "postgres is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetL1GethEndPoint()
assert.EqualError(t, err, "l1 geth is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetL2GethEndPoint()
assert.EqualError(t, err, "l2 geth is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetPoSL1EndPoint()
assert.EqualError(t, err, "PoS L1 container is not running")
assert.Empty(t, endpoint)
}

View File

@@ -302,8 +302,6 @@ func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
}
// EstimateBlockL1CommitCalldataSize calculates the calldata size in l1 commit for this block approximately.
// TODO: The calculation could be more accurate by using 58 + len(l2TxDataBytes) (see Chunk).
// This needs to be adjusted in the future.
func EstimateBlockL1CommitCalldataSize(b *encoding.Block) (uint64, error) {
var size uint64
for _, txData := range b.Transactions {

View File

@@ -338,7 +338,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash,
}
// compute blob versioned hash
c, err := kzg4844.BlobToCommitment(*blob)
c, err := kzg4844.BlobToCommitment(blob)
if err != nil {
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
}
@@ -364,7 +364,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash,
func makeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) {
// blob contains 131072 bytes but we can only utilize 31/32 of these
if len(blobBytes) > 126976 {
return nil, fmt.Errorf("oversized batch payload")
return nil, fmt.Errorf("oversized batch payload, blob bytes length: %v, max length: %v", len(blobBytes), 126976)
}
// the canonical (padded) blob payload
@@ -435,12 +435,12 @@ func (b *DABatch) BlobDataProof() ([]byte, error) {
return nil, errors.New("called BlobDataProof with empty z")
}
commitment, err := kzg4844.BlobToCommitment(*b.blob)
commitment, err := kzg4844.BlobToCommitment(b.blob)
if err != nil {
return nil, fmt.Errorf("failed to create blob commitment")
}
proof, y, err := kzg4844.ComputeProof(*b.blob, *b.z)
proof, y, err := kzg4844.ComputeProof(b.blob, *b.z)
if err != nil {
log.Crit("failed to create KZG proof at point", "err", err, "z", hex.EncodeToString(b.z[:]))
}
@@ -472,8 +472,7 @@ func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
if err != nil {
return 0, err
}
paddedSize := ((metadataSize + chunkDataSize + 30) / 31) * 32
return paddedSize, nil
return calculatePaddedBlobSize(metadataSize + chunkDataSize), nil
}
// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
@@ -487,8 +486,7 @@ func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
}
batchDataSize += chunkDataSize
}
paddedSize := ((metadataSize + batchDataSize + 30) / 31) * 32
return paddedSize, nil
return calculatePaddedBlobSize(metadataSize + batchDataSize), nil
}
func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
@@ -506,3 +504,134 @@ func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
}
return dataSize, nil
}
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
const CalldataNonZeroByteGas = 16
// GetKeccak256Gas calculates the gas cost for computing the keccak256 hash of a given size.
func GetKeccak256Gas(size uint64) uint64 {
return GetMemoryExpansionCost(size) + 30 + 6*((size+31)/32)
}
// GetMemoryExpansionCost calculates the cost of memory expansion for a given memoryByteSize.
func GetMemoryExpansionCost(memoryByteSize uint64) uint64 {
memorySizeWord := (memoryByteSize + 31) / 32
memoryCost := (memorySizeWord*memorySizeWord)/512 + (3 * memorySizeWord)
return memoryCost
}
// EstimateBlockL1CommitGas calculates the total L1 commit gas for this block approximately.
func EstimateBlockL1CommitGas(b *encoding.Block) uint64 {
var total uint64
var numL1Messages uint64
for _, txData := range b.Transactions {
if txData.Type == types.L1MessageTxType {
numL1Messages++
continue
}
}
// 60 bytes BlockContext calldata
total += CalldataNonZeroByteGas * 60
// sload
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
// staticcall
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue
total += GetMemoryExpansionCost(36) * numL1Messages // staticcall to proxy
total += 100 * numL1Messages // read admin in proxy
total += 100 * numL1Messages // read impl in proxy
total += 100 * numL1Messages // access impl
total += GetMemoryExpansionCost(36) * numL1Messages // delegatecall to impl
return total
}
// EstimateChunkL1CommitCalldataSize calculates the calldata size needed for committing a chunk to L1 approximately.
func EstimateChunkL1CommitCalldataSize(c *encoding.Chunk) uint64 {
return uint64(60 * len(c.Blocks))
}
// EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately.
func EstimateChunkL1CommitGas(c *encoding.Chunk) uint64 {
var totalNonSkippedL1Messages uint64
var totalL1CommitGas uint64
for _, block := range c.Blocks {
totalNonSkippedL1Messages += uint64(len(block.Transactions)) - block.NumL2Transactions()
blockL1CommitGas := EstimateBlockL1CommitGas(block)
totalL1CommitGas += blockL1CommitGas
}
numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalNonSkippedL1Messages) // chunk hash
return totalL1CommitGas
}
// EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately.
func EstimateBatchL1CommitGas(b *encoding.Batch) uint64 {
var totalL1CommitGas uint64
// Add extra gas costs
totalL1CommitGas += 100000 // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 21000 // base fee for tx
totalL1CommitGas += CalldataNonZeroByteGas // version in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += GetKeccak256Gas(89 + 32) // parent batch header hash, length is estimated as 89 (constant part)+ 32 (1 skippedL1MessageBitmap)
totalL1CommitGas += CalldataNonZeroByteGas * (89 + 32) // parent batch header in calldata
// adjust batch data hash gas cost
totalL1CommitGas += GetKeccak256Gas(uint64(32 * len(b.Chunks)))
totalL1MessagePoppedBefore := b.TotalL1MessagePoppedBefore
for _, chunk := range b.Chunks {
chunkL1CommitGas := EstimateChunkL1CommitGas(chunk)
totalL1CommitGas += chunkL1CommitGas
totalL1MessagePoppedInChunk := chunk.NumL1Messages(totalL1MessagePoppedBefore)
totalL1MessagePoppedBefore += totalL1MessagePoppedInChunk
totalL1CommitGas += CalldataNonZeroByteGas * (32 * (totalL1MessagePoppedInChunk + 255) / 256)
totalL1CommitGas += GetKeccak256Gas(89 + 32*(totalL1MessagePoppedInChunk+255)/256)
totalL1CommitCalldataSize := EstimateChunkL1CommitCalldataSize(chunk)
totalL1CommitGas += GetMemoryExpansionCost(totalL1CommitCalldataSize)
}
return totalL1CommitGas
}
// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) uint64 {
var totalL1CommitCalldataSize uint64
for _, chunk := range b.Chunks {
totalL1CommitCalldataSize += EstimateChunkL1CommitCalldataSize(chunk)
}
return totalL1CommitCalldataSize
}
// calculatePaddedBlobSize calculates the required size on blob storage
// where every 32 bytes can store only 31 bytes of actual data, with the first byte being zero.
func calculatePaddedBlobSize(dataSize uint64) uint64 {
paddedSize := (dataSize / 31) * 32
if dataSize%31 != 0 {
paddedSize += 1 + dataSize%31 // Add 1 byte for the first empty byte plus the remainder bytes
}
return paddedSize
}

View File

@@ -592,7 +592,7 @@ func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
actualZ := hex.EncodeToString(z[:])
assert.Equal(t, tc.expectedz, actualZ)
_, y, err := kzg4844.ComputeProof(*b, *z)
_, y, err := kzg4844.ComputeProof(b, *z)
assert.NoError(t, err)
actualY := hex.EncodeToString(y[:])
assert.Equal(t, tc.expectedy, actualY)
@@ -759,49 +759,121 @@ func TestCodecV1BatchSkipBitmap(t *testing.T) {
assert.Equal(t, 42, int(batch.TotalL1MessagePopped))
}
func TestCodecV1ChunkAndBatchBlobSizeEstimation(t *testing.T) {
func TestCodecV1ChunkAndBatchCommitBlobSizeEstimation(t *testing.T) {
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
chunk2BlobSize, err := EstimateChunkL1CommitBlobSize(chunk2)
assert.NoError(t, err)
assert.Equal(t, uint64(320), chunk2BlobSize)
assert.Equal(t, uint64(302), chunk2BlobSize)
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch2BlobSize, err := EstimateBatchL1CommitBlobSize(batch2)
assert.NoError(t, err)
assert.Equal(t, uint64(320), batch2BlobSize)
assert.Equal(t, uint64(302), batch2BlobSize)
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
chunk3BlobSize, err := EstimateChunkL1CommitBlobSize(chunk3)
assert.NoError(t, err)
assert.Equal(t, uint64(5952), chunk3BlobSize)
assert.Equal(t, uint64(5929), chunk3BlobSize)
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch3BlobSize, err := EstimateBatchL1CommitBlobSize(batch3)
assert.NoError(t, err)
assert.Equal(t, uint64(5952), batch3BlobSize)
assert.Equal(t, uint64(5929), batch3BlobSize)
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk4BlobSize, err := EstimateChunkL1CommitBlobSize(chunk4)
assert.NoError(t, err)
assert.Equal(t, uint64(128), chunk4BlobSize)
assert.Equal(t, uint64(98), chunk4BlobSize)
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch4BlobSize, err := EstimateBatchL1CommitBlobSize(batch4)
assert.NoError(t, err)
assert.Equal(t, uint64(128), batch4BlobSize)
assert.Equal(t, uint64(98), batch4BlobSize)
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
chunk5BlobSize, err := EstimateChunkL1CommitBlobSize(chunk5)
assert.NoError(t, err)
assert.Equal(t, uint64(6176), chunk5BlobSize)
assert.Equal(t, uint64(6166), chunk5BlobSize)
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk6BlobSize, err := EstimateChunkL1CommitBlobSize(chunk6)
assert.NoError(t, err)
assert.Equal(t, uint64(128), chunk6BlobSize)
assert.Equal(t, uint64(98), chunk6BlobSize)
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
batch5BlobSize, err := EstimateBatchL1CommitBlobSize(batch5)
assert.NoError(t, err)
assert.Equal(t, uint64(6208), batch5BlobSize)
assert.Equal(t, uint64(6199), batch5BlobSize)
}
func TestCodecV1ChunkAndBatchCommitCalldataSizeEstimation(t *testing.T) {
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
chunk2CalldataSize := EstimateChunkL1CommitCalldataSize(chunk2)
assert.Equal(t, uint64(60), chunk2CalldataSize)
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch2CalldataSize := EstimateBatchL1CommitCalldataSize(batch2)
assert.Equal(t, uint64(60), batch2CalldataSize)
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
chunk3CalldataSize := EstimateChunkL1CommitCalldataSize(chunk3)
assert.Equal(t, uint64(60), chunk3CalldataSize)
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch3CalldataSize := EstimateBatchL1CommitCalldataSize(batch3)
assert.Equal(t, uint64(60), batch3CalldataSize)
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk4CalldataSize := EstimateChunkL1CommitCalldataSize(chunk4)
assert.Equal(t, uint64(60), chunk4CalldataSize)
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch4BlobSize := EstimateBatchL1CommitCalldataSize(batch4)
assert.Equal(t, uint64(60), batch4BlobSize)
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
chunk5CalldataSize := EstimateChunkL1CommitCalldataSize(chunk5)
assert.Equal(t, uint64(120), chunk5CalldataSize)
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk6BlobSize := EstimateChunkL1CommitCalldataSize(chunk6)
assert.Equal(t, uint64(60), chunk6BlobSize)
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
batch5CalldataSize := EstimateBatchL1CommitCalldataSize(batch5)
assert.Equal(t, uint64(180), batch5CalldataSize)
}
func TestCodecV1ChunkAndBatchCommitGasEstimation(t *testing.T) {
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
chunk2Gas := EstimateChunkL1CommitGas(chunk2)
assert.Equal(t, uint64(2084), chunk2Gas)
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch2Gas := EstimateBatchL1CommitGas(batch2)
assert.Equal(t, uint64(158609), batch2Gas)
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
chunk3Gas := EstimateChunkL1CommitGas(chunk3)
assert.Equal(t, uint64(2084), chunk3Gas)
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch3Gas := EstimateBatchL1CommitGas(batch3)
assert.Equal(t, uint64(158609), batch3Gas)
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk4Gas := EstimateChunkL1CommitGas(chunk4)
assert.Equal(t, uint64(4705), chunk4Gas)
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch4Gas := EstimateBatchL1CommitGas(batch4)
assert.Equal(t, uint64(161262), batch4Gas)
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
chunk5Gas := EstimateChunkL1CommitGas(chunk5)
assert.Equal(t, uint64(4122), chunk5Gas)
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
chunk6Gas := EstimateChunkL1CommitGas(chunk6)
assert.Equal(t, uint64(4705), chunk6Gas)
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
batch5Gas := EstimateBatchL1CommitGas(batch5)
assert.Equal(t, uint64(165967), batch5Gas)
}
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {

View File

@@ -9,7 +9,7 @@ import (
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
func CheckScrollProverVersion(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// note the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.4.1"
var tag = "v4.4.4"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -67,7 +67,7 @@ Commit a batch of transactions on layer 1.
function committedBatches(uint256) external view returns (bytes32)
```
Return the batch hash of a committed batch.
@@ -81,7 +81,7 @@ Return the batch hash of a committed batch.
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
| _0 | bytes32 | The batch hash of a committed batch. |
### finalizeBatchWithProof
@@ -130,7 +130,7 @@ Finalize a committed batch (with blob) on layer 1.
function finalizedStateRoots(uint256) external view returns (bytes32)
```
Return the state root of a committed batch.
@@ -144,7 +144,7 @@ Return the state root of a committed batch.
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
| _0 | bytes32 | The state root of a committed batch. |
### importGenesisBatch
@@ -160,8 +160,8 @@ Import layer 2 genesis block
| Name | Type | Description |
|---|---|---|
| _batchHeader | bytes | undefined |
| _stateRoot | bytes32 | undefined |
| _batchHeader | bytes | The header of the genesis batch. |
| _stateRoot | bytes32 | The state root of the genesis block. |
### initialize
@@ -187,7 +187,7 @@ Initialize the storage of ScrollChain.
function isBatchFinalized(uint256 _batchIndex) external view returns (bool)
```
Return whether the batch is finalized by batch index.
@@ -201,7 +201,7 @@ Return whether the batch is finalized by batch index.
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
| _0 | bool | Whether the batch is finalized by batch index. |
### isProver
@@ -253,7 +253,7 @@ Whether an account is a sequencer.
function lastFinalizedBatchIndex() external view returns (uint256)
```
The latest finalized batch index.
@@ -262,7 +262,7 @@ The latest finalized batch index.
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
| _0 | uint256 | The latest finalized batch index. |
### layer2ChainId
@@ -480,7 +480,7 @@ The address of RollupVerifier.
function withdrawRoots(uint256) external view returns (bytes32)
```
Return the message root of a committed batch.
@@ -494,7 +494,7 @@ Return the message root of a committed batch.
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
| _0 | bytes32 | The message root of a committed batch. |

View File

@@ -11,7 +11,7 @@ import {IScrollERC20Upgradeable} from "../../libraries/token/IScrollERC20Upgrade
/// @title L2ERC20Gateway
/// @notice The `L2ERC20Gateway` is used to withdraw custom ERC20 compatible tokens on layer 2 and
/// finalize deposit the tokens from layer 1.
/// @dev The withdrawn tokens tokens will be burned directly. On finalizing deposit, the corresponding
/// @dev The withdrawn tokens will be burned directly. On finalizing deposit, the corresponding
/// tokens will be minted and transferred to the recipient.
contract L2CustomERC20Gateway is L2ERC20Gateway {
/**********

View File

@@ -0,0 +1,35 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
library BatchBridgeCodec {
/// @dev Encode the `token` and `batchIndex` to single `bytes32`.
function encodeInitialNode(address token, uint64 batchIndex) internal pure returns (bytes32 node) {
assembly {
node := add(shl(96, token), batchIndex)
}
}
/// @dev Encode the `sender` and `amount` to single `bytes32`.
function encodeNode(address sender, uint96 amount) internal pure returns (bytes32 node) {
assembly {
node := add(shl(96, sender), amount)
}
}
/// @dev Decode `bytes32` `node` to `receiver` and `amount`.
function decodeNode(bytes32 node) internal pure returns (address receiver, uint256 amount) {
receiver = address(uint160(uint256(node) >> 96));
amount = uint256(node) & 0xffffffffffffffffffffffff;
}
/// @dev Compute `keccak256(concat(a, b))`.
function hash(bytes32 a, bytes32 b) internal pure returns (bytes32 value) {
// solhint-disable-next-line no-inline-assembly
assembly {
mstore(0x00, a)
mstore(0x20, b)
value := keccak256(0x00, 0x40)
}
}
}

View File

@@ -0,0 +1,425 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {AccessControlEnumerableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/AccessControlEnumerableUpgradeable.sol";
import {ReentrancyGuardUpgradeable} from "@openzeppelin/contracts-upgradeable/security/ReentrancyGuardUpgradeable.sol";
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {AddressUpgradeable} from "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol";
import {IL1ERC20Gateway} from "../L1/gateways/IL1ERC20Gateway.sol";
import {IL1GatewayRouter} from "../L1/gateways/IL1GatewayRouter.sol";
import {IL1MessageQueue} from "../L1/rollup/IL1MessageQueue.sol";
import {IL1ScrollMessenger} from "../L1/IL1ScrollMessenger.sol";
import {BatchBridgeCodec} from "./BatchBridgeCodec.sol";
import {L2BatchBridgeGateway} from "./L2BatchBridgeGateway.sol";
/// @title L1BatchBridgeGateway
contract L1BatchBridgeGateway is AccessControlEnumerableUpgradeable, ReentrancyGuardUpgradeable {
using SafeERC20Upgradeable for IERC20Upgradeable;
/**********
* Events *
**********/
/// @notice Emitted when some user deposited token to this contract.
/// @param sender The address of token sender.
/// @param token The address of deposited token.
/// @param batchIndex The batch index of current deposit.
/// @param amount The amount of token deposited (including fee).
/// @param fee The amount of fee charged.
event Deposit(
address indexed sender,
address indexed token,
uint256 indexed batchIndex,
uint256 amount,
uint256 fee
);
/// @notice Emitted when a batch deposit is initiated.
/// @param caller The address of caller who initiate the deposit.
/// @param l1Token The address of the token in L1 to deposit.
/// @param batchIndex The index of current batch deposit.
/// @param l2Token The address of the corresponding token in L2.
event BatchDeposit(address indexed caller, address indexed l1Token, uint256 indexed batchIndex, address l2Token);
/**********
* Errors *
**********/
/// @dev Thrown when caller is not `messenger`.
error ErrorCallerNotMessenger();
/// @dev Thrown when the deposited amount is smaller than `minAmountPerTx`.
error ErrorDepositAmountTooSmall();
/// @dev Thrown when users try to deposit ETH with `depositERC20` method.
error ErrorIncorrectMethodForETHDeposit();
/// @dev Thrown when the `msg.value` is not enough for batch deposit fee.
error ErrorInsufficientMsgValueForBatchDepositFee();
/// @dev Thrown when the given new batch config is invalid.
error ErrorInvalidBatchConfig();
/// @dev Thrown when no pending batch exists.
error ErrorNoPendingBatch();
/// @dev Thrown when user deposits unsupported tokens.
error ErrorTokenNotSupported();
/// @dev Thrown when ETH transfer failed.
error ErrorTransferETHFailed();
/*************
* Constants *
*************/
/// @notice The role for batch deposit keeper.
bytes32 public constant KEEPER_ROLE = keccak256("KEEPER_ROLE");
/// @notice The safe gas limit for batch bridge.
uint256 private constant SAFE_BATCH_BRIDGE_GAS_LIMIT = 200000;
/// @notice The address of corresponding `L2BatchDepositGateway` contract.
address public immutable counterpart;
/// @notice The address of `L1GatewayRouter` contract.
address public immutable router;
/// @notice The address of `L1ScrollMessenger` contract.
address public immutable messenger;
/// @notice The address of `L1MessageQueue` contract.
address public immutable queue;
/***********
* Structs *
***********/
/// @notice The config for batch token bridge.
/// @dev Compiler will pack this into a single `bytes32`.
/// @param feeAmountPerTx The amount of fee charged for each deposit.
/// @param minAmountPerTx The minimum amount of token for each deposit.
/// @param maxTxsPerBatch The maximum number of deposit in each batch.
/// @param maxDelayPerBatch The maximum number of seconds to wait in each batch.
/// @param safeBridgeGasLimit The safe bridge gas limit for bridging token from L1 to L2.
struct BatchConfig {
uint96 feeAmountPerTx;
uint96 minAmountPerTx;
uint16 maxTxsPerBatch;
uint24 maxDelayPerBatch;
uint24 safeBridgeGasLimit;
}
/// @dev Compiler will pack this into two `bytes32`.
/// @param amount The total amount of token to deposit in current batch.
/// @param startTime The timestamp of the first deposit.
/// @param numDeposits The total number of deposits in current batch.
/// @param hash The hash of current batch.
/// Suppose there are `n` deposits in current batch with `senders` and `amounts`. The hash is computed as
/// ```text
/// hash[0] = concat(token, batch_index)
/// hash[i] = keccak(hash[i-1], concat(senders[i], amounts[i]))
/// ```
/// The type of `token` and `senders` is `address`, while The type of `batch_index` and `amounts[i]` is `uint96`.
/// In current way, the hash of each batch among all tokens should be different.
struct BatchState {
uint128 amount;
uint64 startTime;
uint64 numDeposits;
bytes32 hash;
}
/// @dev Compiler will pack this into a single `bytes32`.
/// @param pending The total amount of token pending to bridge.
/// @param currentBatchIndex The index of current batch.
/// @param pendingBatchIndex The index of pending batch (next batch to bridge).
struct TokenState {
uint128 pending;
uint64 currentBatchIndex;
uint64 pendingBatchIndex;
}
/*************
* Variables *
*************/
/// @notice Mapping from token address to batch bridge config.
/// @dev The `address(0)` is used for ETH.
mapping(address => BatchConfig) public configs;
/// @notice Mapping from token address to batch index to batch state.
/// @dev The `address(0)` is used for ETH.
mapping(address => mapping(uint256 => BatchState)) public batches;
/// @notice Mapping from token address to token state.
/// @dev The `address(0)` is used for ETH.
mapping(address => TokenState) public tokens;
/// @notice The address of fee vault.
address public feeVault;
/***************
* Constructor *
***************/
/// @param _counterpart The address of `L2BatchDepositGateway` contract in L2.
/// @param _router The address of `L1GatewayRouter` contract in L1.
/// @param _messenger The address of `L1ScrollMessenger` contract in L1.
/// @param _queue The address of `L1MessageQueue` contract in L1.
constructor(
address _counterpart,
address _router,
address _messenger,
address _queue
) {
_disableInitializers();
counterpart = _counterpart;
router = _router;
messenger = _messenger;
queue = _queue;
}
/// @notice Initialize the storage of `L1BatchDepositGateway`.
/// @param _feeVault The address of fee vault contract.
function initialize(address _feeVault) external initializer {
__Context_init(); // from ContextUpgradeable
__ERC165_init(); // from ERC165Upgradeable
__AccessControl_init(); // from AccessControlUpgradeable
__AccessControlEnumerable_init(); // from AccessControlEnumerableUpgradeable
__ReentrancyGuard_init(); // from ReentrancyGuardUpgradeable
feeVault = _feeVault;
_grantRole(DEFAULT_ADMIN_ROLE, _msgSender());
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Receive refunded ETH from `L1ScrollMessenger`.
receive() external payable {
if (_msgSender() != messenger) {
revert ErrorCallerNotMessenger();
}
}
/// @notice Deposit ETH.
function depositETH() external payable {
// no safe cast check here, since no one has so much ETH yet.
_deposit(address(0), _msgSender(), uint96(msg.value));
}
/// @notice Deposit ERC20 token.
///
/// @param token The address of token.
/// @param amount The amount of token to deposit. We use type `uint96`, since it is enough for most of the major tokens.
function depositERC20(address token, uint96 amount) external {
if (token == address(0)) revert ErrorIncorrectMethodForETHDeposit();
// common practice to handle fee on transfer token.
uint256 beforeBalance = IERC20Upgradeable(token).balanceOf(address(this));
IERC20Upgradeable(token).safeTransferFrom(_msgSender(), address(this), amount);
amount = uint96(IERC20Upgradeable(token).balanceOf(address(this)) - beforeBalance);
_deposit(token, _msgSender(), amount);
}
/************************
* Restricted Functions *
************************/
/// @notice Add or update the batch bridge config for the given token.
///
/// @dev The caller should make sure `safeBridgeGasLimit` is enough for batch bridging.
///
/// @param token The address of token to update.
/// @param newConfig The new config.
function setBatchConfig(address token, BatchConfig memory newConfig) external onlyRole(DEFAULT_ADMIN_ROLE) {
if (
newConfig.maxTxsPerBatch == 0 ||
newConfig.maxDelayPerBatch == 0 ||
newConfig.feeAmountPerTx > newConfig.minAmountPerTx
) {
revert ErrorInvalidBatchConfig();
}
configs[token] = newConfig;
}
/// @notice Initiate the batch bridge of current pending batch.
/// @param token The address of the token.
function executeBatchDeposit(address token) external payable onlyRole(KEEPER_ROLE) {
BatchConfig memory cachedBatchConfig = configs[token];
TokenState memory cachedTokenState = tokens[token];
_tryFinalizeCurrentBatch(token, cachedBatchConfig, cachedTokenState);
// no batch to bridge
if (cachedTokenState.currentBatchIndex == cachedTokenState.pendingBatchIndex) {
revert ErrorNoPendingBatch();
}
// check bridge fee
uint256 depositFee = IL1MessageQueue(queue).estimateCrossDomainMessageFee(cachedBatchConfig.safeBridgeGasLimit);
uint256 batchBridgeFee = IL1MessageQueue(queue).estimateCrossDomainMessageFee(SAFE_BATCH_BRIDGE_GAS_LIMIT);
if (msg.value < depositFee + batchBridgeFee) {
revert ErrorInsufficientMsgValueForBatchDepositFee();
}
// take accumulated fee to fee vault
uint256 accumulatedFee;
if (token == address(0)) {
// no uncheck here just in case
accumulatedFee = address(this).balance - msg.value - cachedTokenState.pending;
} else {
// no uncheck here just in case
accumulatedFee = IERC20Upgradeable(token).balanceOf(address(this)) - cachedTokenState.pending;
}
if (accumulatedFee > 0) {
_transferToken(token, feeVault, accumulatedFee);
}
// deposit token to L2
BatchState memory cachedBatchState = batches[token][cachedTokenState.pendingBatchIndex];
address l2Token;
if (token == address(0)) {
IL1ScrollMessenger(messenger).sendMessage{value: cachedBatchState.amount + depositFee}(
counterpart,
cachedBatchState.amount,
new bytes(0),
cachedBatchConfig.safeBridgeGasLimit
);
} else {
address gateway = IL1GatewayRouter(router).getERC20Gateway(token);
l2Token = IL1ERC20Gateway(gateway).getL2ERC20Address(token);
IERC20Upgradeable(token).safeApprove(gateway, 0);
IERC20Upgradeable(token).safeApprove(gateway, cachedBatchState.amount);
IL1ERC20Gateway(gateway).depositERC20{value: depositFee}(
token,
counterpart,
cachedBatchState.amount,
cachedBatchConfig.safeBridgeGasLimit
);
}
// notify `L2BatchBridgeGateway`
IL1ScrollMessenger(messenger).sendMessage{value: batchBridgeFee}(
counterpart,
0,
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(token, l2Token, cachedTokenState.pendingBatchIndex, cachedBatchState.hash)
),
SAFE_BATCH_BRIDGE_GAS_LIMIT
);
emit BatchDeposit(_msgSender(), token, cachedTokenState.pendingBatchIndex, l2Token);
// update token state
unchecked {
cachedTokenState.pending -= uint128(cachedBatchState.amount);
cachedTokenState.pendingBatchIndex += 1;
}
tokens[token] = cachedTokenState;
// refund keeper fee
unchecked {
if (msg.value > depositFee + batchBridgeFee) {
_transferToken(address(0), _msgSender(), msg.value - depositFee - batchBridgeFee);
}
}
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to deposit token.
/// @param token The address of token to deposit.
/// @param sender The address of token sender.
/// @param amount The amount of token to deposit.
function _deposit(
address token,
address sender,
uint96 amount
) internal nonReentrant {
BatchConfig memory cachedBatchConfig = configs[token];
TokenState memory cachedTokenState = tokens[token];
_tryFinalizeCurrentBatch(token, cachedBatchConfig, cachedTokenState);
BatchState memory cachedBatchState = batches[token][cachedTokenState.currentBatchIndex];
if (amount < cachedBatchConfig.minAmountPerTx) {
revert ErrorDepositAmountTooSmall();
}
emit Deposit(sender, token, cachedTokenState.currentBatchIndex, amount, cachedBatchConfig.feeAmountPerTx);
// deduct fee and update cached state
unchecked {
amount -= cachedBatchConfig.feeAmountPerTx;
cachedTokenState.pending += amount;
cachedBatchState.amount += amount;
cachedBatchState.numDeposits += 1;
}
// compute the hash chain
bytes32 node = BatchBridgeCodec.encodeNode(sender, amount);
if (cachedBatchState.hash == bytes32(0)) {
bytes32 initialNode = BatchBridgeCodec.encodeInitialNode(token, cachedTokenState.currentBatchIndex);
// this is first tx in this batch
cachedBatchState.hash = BatchBridgeCodec.hash(initialNode, node);
cachedBatchState.startTime = uint64(block.timestamp);
} else {
cachedBatchState.hash = BatchBridgeCodec.hash(cachedBatchState.hash, node);
}
batches[token][cachedTokenState.currentBatchIndex] = cachedBatchState;
tokens[token] = cachedTokenState;
}
/// @dev Internal function to finalize current batch.
/// This function may change the value of `cachedTokenState`, which can be used in later operation.
/// @param token The address of token to finalize.
/// @param cachedBatchConfig The cached batch config in memory.
/// @param cachedTokenState The cached token state in memory.
function _tryFinalizeCurrentBatch(
address token,
BatchConfig memory cachedBatchConfig,
TokenState memory cachedTokenState
) internal view {
if (cachedBatchConfig.maxTxsPerBatch == 0) {
revert ErrorTokenNotSupported();
}
BatchState memory cachedBatchState = batches[token][cachedTokenState.currentBatchIndex];
// return if it is the very first deposit in the current batch
if (cachedBatchState.numDeposits == 0) return;
// finalize current batchIndex when `maxTxsPerBatch` or `maxDelayPerBatch` reached.
if (
cachedBatchState.numDeposits == cachedBatchConfig.maxTxsPerBatch ||
block.timestamp - cachedBatchState.startTime > cachedBatchConfig.maxDelayPerBatch
) {
cachedTokenState.currentBatchIndex += 1;
}
}
/// @dev Internal function to transfer token, including ETH.
/// @param token The address of token.
/// @param receiver The address of token receiver.
/// @param amount The amount of token to transfer.
function _transferToken(
address token,
address receiver,
uint256 amount
) private {
if (token == address(0)) {
(bool success, ) = receiver.call{value: amount}("");
if (!success) revert ErrorTransferETHFailed();
} else {
IERC20Upgradeable(token).safeTransfer(receiver, amount);
}
}
}

View File

@@ -0,0 +1,246 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {AccessControlEnumerableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/AccessControlEnumerableUpgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {IL2ScrollMessenger} from "../L2/IL2ScrollMessenger.sol";
import {BatchBridgeCodec} from "./BatchBridgeCodec.sol";
/// @title L2BatchBridgeGateway
contract L2BatchBridgeGateway is AccessControlEnumerableUpgradeable {
/**********
* Events *
**********/
/// @notice Emitted when token mapping for ERC20 token is updated.
/// @param l2Token The address of corresponding ERC20 token in layer 2.
/// @param oldL1Token The address of the old corresponding ERC20 token in layer 1.
/// @param newL1Token The address of the new corresponding ERC20 token in layer 1.
event UpdateTokenMapping(address indexed l2Token, address indexed oldL1Token, address indexed newL1Token);
/// @notice Emitted when batch bridge is finalized.
/// @param l1Token The address of token in L1.
/// @param l2Token The address of token in L2.
/// @param batchIndex The index of batch finalized.
event FinalizeBatchDeposit(address indexed l1Token, address indexed l2Token, uint256 indexed batchIndex);
/// @notice Emitted when batch distribution finished.
/// @param l1Token The address of token in L1.
/// @param l2Token The address of token in L2.
/// @param batchIndex The index of batch distributed.
event BatchDistribute(address indexed l1Token, address indexed l2Token, uint256 indexed batchIndex);
/// @notice Emitted when token distribute failed.
/// @param l2Token The address of token in L2.
/// @param batchIndex The index of the batch.
/// @param receiver The address of token receiver.
/// @param amount The amount of token to distribute.
event DistributeFailed(address indexed l2Token, uint256 indexed batchIndex, address receiver, uint256 amount);
/**********
* Errors *
**********/
/// @dev Thrown when caller is not `messenger`.
error ErrorCallerNotMessenger();
/// @dev Thrown when the L1 token mapping mismatch with `finalizeBatchBridge`.
error ErrorL1TokenMismatched();
/// @dev Thrown when message sender is not `counterpart`.
error ErrorMessageSenderNotCounterpart();
/// @dev Thrown no failed distribution exists.
error ErrorNoFailedDistribution();
/// @dev Thrown when the batch hash mismatch.
error ErrorBatchHashMismatch();
/// @dev Thrown when distributing the same batch.
error ErrorBatchDistributed();
/*************
* Constants *
*************/
/// @notice The role for batch deposit keeper.
bytes32 public constant KEEPER_ROLE = keccak256("KEEPER_ROLE");
/// @notice The safe gas limit for ETH transfer
uint256 private constant SAFE_ETH_TRANSFER_GAS_LIMIT = 50000;
/// @notice The address of corresponding `L1BatchBridgeGateway` contract.
address public immutable counterpart;
/// @notice The address of corresponding `L2ScrollMessenger` contract.
address public immutable messenger;
/*************
* Variables *
*************/
/// @notice Mapping from l2 token address to l1 token address.
mapping(address => address) public tokenMapping;
/// @notice Mapping from L2 token address to batch index to batch hash.
mapping(address => mapping(uint256 => bytes32)) public batchHashes;
/// @notice Mapping from token address to the amount of failed distribution.
mapping(address => uint256) public failedAmount;
/// @notice Mapping from batch hash to the distribute status.
mapping(bytes32 => bool) public isDistributed;
/*************
* Modifiers *
*************/
modifier onlyMessenger() {
if (_msgSender() != messenger) {
revert ErrorCallerNotMessenger();
}
_;
}
/***************
* Constructor *
***************/
/// @param _counterpart The address of `L1BatchBridgeGateway` contract in L1.
/// @param _messenger The address of `L2ScrollMessenger` contract in L2.
constructor(address _counterpart, address _messenger) {
_disableInitializers();
counterpart = _counterpart;
messenger = _messenger;
}
/// @notice Initialize the storage of `L2BatchBridgeGateway`.
function initialize() external initializer {
__Context_init(); // from ContextUpgradeable
__ERC165_init(); // from ERC165Upgradeable
__AccessControl_init(); // from AccessControlUpgradeable
__AccessControlEnumerable_init(); // from AccessControlEnumerableUpgradeable
_grantRole(DEFAULT_ADMIN_ROLE, _msgSender());
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Receive batch bridged ETH from `L2ScrollMessenger`.
receive() external payable onlyMessenger {
// empty
}
/// @notice Finalize L1 initiated batch token deposit.
/// @param l1Token The address of the token in L1.
/// @param l2Token The address of the token in L2.
/// @param batchIndex The index of this batch bridge.
/// @param hash The hash of this batch.
function finalizeBatchDeposit(
address l1Token,
address l2Token,
uint256 batchIndex,
bytes32 hash
) external onlyMessenger {
if (counterpart != IL2ScrollMessenger(messenger).xDomainMessageSender()) {
revert ErrorMessageSenderNotCounterpart();
}
// trust the messenger and update `tokenMapping` in first call
// another assumption is this function should never fail due to out of gas
address storedL1Token = tokenMapping[l2Token];
if (storedL1Token == address(0) && l1Token != address(0)) {
tokenMapping[l2Token] = l1Token;
} else if (storedL1Token != l1Token) {
// this usually won't happen, check just in case.
revert ErrorL1TokenMismatched();
}
batchHashes[l2Token][batchIndex] = hash;
emit FinalizeBatchDeposit(l1Token, l2Token, batchIndex);
}
/************************
* Restricted Functions *
************************/
/// @notice Withdraw distribution failed tokens.
/// @param token The address of token to withdraw.
/// @param receiver The address of token receiver.
function withdrawFailedAmount(address token, address receiver) external onlyRole(DEFAULT_ADMIN_ROLE) {
uint256 amount = failedAmount[token];
if (amount == 0) revert ErrorNoFailedDistribution();
failedAmount[token] = 0;
_transferToken(token, receiver, amount);
}
/// @notice Distribute deposited token to corresponding receivers.
/// @param l2Token The address of L2 token.
/// @param batchIndex The index of batch to distribute.
/// @param nodes The list of encoded L1 deposits.
function distribute(
address l2Token,
uint64 batchIndex,
bytes32[] memory nodes
) external onlyRole(KEEPER_ROLE) {
address l1Token = tokenMapping[l2Token];
bytes32 hash = BatchBridgeCodec.encodeInitialNode(l1Token, batchIndex);
for (uint256 i = 0; i < nodes.length; i++) {
hash = BatchBridgeCodec.hash(hash, nodes[i]);
}
if (batchHashes[l2Token][batchIndex] != hash) {
revert ErrorBatchHashMismatch();
}
if (isDistributed[hash]) {
revert ErrorBatchDistributed();
}
isDistributed[hash] = true;
// do transfer and allow failure to avoid DDOS attack
for (uint256 i = 0; i < nodes.length; i++) {
(address receiver, uint256 amount) = BatchBridgeCodec.decodeNode(nodes[i]);
if (!_transferToken(l2Token, receiver, amount)) {
failedAmount[l2Token] += amount;
emit DistributeFailed(l2Token, batchIndex, receiver, amount);
}
}
emit BatchDistribute(l1Token, l2Token, batchIndex);
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to transfer token, including ETH.
/// @param token The address of token.
/// @param receiver The address of token receiver.
/// @param amount The amount of token to transfer.
/// @return success Whether the transfer is successful.
function _transferToken(
address token,
address receiver,
uint256 amount
) private returns (bool success) {
if (token == address(0)) {
// We add gas limit here to avoid DDOS from malicious receiver.
(success, ) = receiver.call{value: amount, gas: SAFE_ETH_TRANSFER_GAS_LIMIT}("");
} else {
// We perform a low level call here, to bypass Solidity's return data size checking mechanism.
// Normally, the token is selected that the call would not revert unless out of gas.
bytes memory returnData;
(success, returnData) = token.call(abi.encodeCall(IERC20Upgradeable.transfer, (receiver, amount)));
if (success && returnData.length > 0) {
success = abi.decode(returnData, (bool));
}
}
}
}

View File

@@ -79,7 +79,7 @@ library BatchHeaderV0Codec {
/// @notice Get the number of L1 messages popped before this batch.
/// @param batchPtr The start memory offset of the batch header in memory.
/// @return _totalL1MessagePopped The the number of L1 messages popped before this batch.
/// @return _totalL1MessagePopped The number of L1 messages popped before this batch.
function getTotalL1MessagePopped(uint256 batchPtr) internal pure returns (uint256 _totalL1MessagePopped) {
assembly {
_totalL1MessagePopped := shr(192, mload(add(batchPtr, 17)))

View File

@@ -79,7 +79,7 @@ library BatchHeaderV1Codec {
/// @notice Get the number of L1 messages popped before this batch.
/// @param batchPtr The start memory offset of the batch header in memory.
/// @return _totalL1MessagePopped The the number of L1 messages popped before this batch.
/// @return _totalL1MessagePopped The number of L1 messages popped before this batch.
function getTotalL1MessagePopped(uint256 batchPtr) internal pure returns (uint256 _totalL1MessagePopped) {
assembly {
_totalL1MessagePopped := shr(192, mload(add(batchPtr, 17)))

View File

@@ -98,7 +98,7 @@ According to the Scroll documentation, `L1ScrollMessenger`:
This contract is central in the L2-to-L1 communication process since all messages from L2 that verified by the zkevm proof are executed on behalf of this contract.
In case of a vulnerability in the `L1ScrollMessenger`, which allows the attacker to send arbitrary messages bypassing the the zkevm proof, an attacker can immediately drain tokens from the L1 bridge.
In case of a vulnerability in the `L1ScrollMessenger`, which allows the attacker to send arbitrary messages bypassing the zkevm proof, an attacker can immediately drain tokens from the L1 bridge.
Additional risk creates the upgradeability of the `L1ScrollMessenger`. Exist a risk of an attack with the replacement of the implementation with some malicious functionality. Such an attack might be reduced to the above vulnerability and steal all locked tokens on the L1 bridge.

View File

@@ -0,0 +1,634 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
import {ITransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
import {Strings} from "@openzeppelin/contracts/utils/Strings.sol";
import {L1BatchBridgeGateway} from "../../batch-bridge/L1BatchBridgeGateway.sol";
import {L2BatchBridgeGateway} from "../../batch-bridge/L2BatchBridgeGateway.sol";
import {BatchBridgeCodec} from "../../batch-bridge/BatchBridgeCodec.sol";
import {IL1ERC20Gateway, L1CustomERC20Gateway} from "../../L1/gateways/L1CustomERC20Gateway.sol";
import {L1GatewayRouter} from "../../L1/gateways/L1GatewayRouter.sol";
import {IL2ERC20Gateway, L2CustomERC20Gateway} from "../../L2/gateways/L2CustomERC20Gateway.sol";
import {AddressAliasHelper} from "../../libraries/common/AddressAliasHelper.sol";
import {ScrollConstants} from "../../libraries/constants/ScrollConstants.sol";
import {L1GatewayTestBase} from "../L1GatewayTestBase.t.sol";
contract L1BatchBridgeGatewayTest is L1GatewayTestBase {
event Deposit(
address indexed sender,
address indexed token,
uint256 indexed batchIndex,
uint256 amount,
uint256 fee
);
event BatchDeposit(address indexed caller, address indexed l1Token, uint256 indexed batchIndex, address l2Token);
event DepositERC20(
address indexed _l1Token,
address indexed _l2Token,
address indexed _from,
address _to,
uint256 _amount,
bytes _data
);
uint24 private constant SAFE_BATCH_BRIDGE_GAS_LIMIT = 200000;
uint24 ETH_DEPOSIT_SAFE_GAS_LIMIT = 300000;
uint24 ERC20_DEPOSIT_SAFE_GAS_LIMIT = 200000;
uint256 private constant L2_GAS_PRICE = 10;
L1BatchBridgeGateway private batch;
L1CustomERC20Gateway private gateway;
L1GatewayRouter private router;
L2CustomERC20Gateway private counterpartGateway;
L2BatchBridgeGateway private counterpartBatch;
MockERC20 private l1Token;
MockERC20 private l2Token;
address private batchFeeVault;
function setUp() public {
__L1GatewayTestBase_setUp();
batchFeeVault = address(uint160(address(this)) - 2);
// Deploy tokens
l1Token = new MockERC20("Mock L1", "ML1", 18);
l2Token = new MockERC20("Mock L2", "ML2", 18);
// Deploy L2 contracts
counterpartGateway = new L2CustomERC20Gateway(address(1), address(1), address(1));
counterpartBatch = new L2BatchBridgeGateway(address(1), address(1));
// Deploy L1 contracts
router = L1GatewayRouter(_deployProxy(address(new L1GatewayRouter())));
gateway = L1CustomERC20Gateway(_deployProxy(address(0)));
batch = L1BatchBridgeGateway(payable(_deployProxy(address(0))));
// Initialize L1 contracts
admin.upgrade(
ITransparentUpgradeableProxy(address(gateway)),
address(new L1CustomERC20Gateway(address(counterpartGateway), address(router), address(l1Messenger)))
);
gateway.initialize(address(counterpartGateway), address(router), address(l1Messenger));
admin.upgrade(
ITransparentUpgradeableProxy(address(batch)),
address(
new L1BatchBridgeGateway(
address(counterpartBatch),
address(router),
address(l1Messenger),
address(messageQueue)
)
)
);
batch.initialize(batchFeeVault);
router.initialize(address(0), address(gateway));
messageQueue.setL2BaseFee(L2_GAS_PRICE);
// Prepare token balances
l1Token.mint(address(this), type(uint128).max);
gateway.updateTokenMapping(address(l1Token), address(l2Token));
hevm.warp(1000000);
}
function testInitialized() external {
assertBoolEq(true, batch.hasRole(bytes32(0), address(this)));
assertEq(address(counterpartBatch), batch.counterpart());
assertEq(address(router), batch.router());
assertEq(address(l1Messenger), batch.messenger());
assertEq(address(messageQueue), batch.queue());
hevm.expectRevert("Initializable: contract is already initialized");
batch.initialize(address(0));
}
function testSetTokenSetting() external {
// revert not admin
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
);
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 0, 0, 0));
hevm.stopPrank();
// revert maxTxsPerBatch = 0
hevm.expectRevert(L1BatchBridgeGateway.ErrorInvalidBatchConfig.selector);
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 0, 0, 0));
// revert maxDelayPerBatch = 0
hevm.expectRevert(L1BatchBridgeGateway.ErrorInvalidBatchConfig.selector);
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 1, 0, 0));
// revert feeAmountPerTx > minAmountPerTx
hevm.expectRevert(L1BatchBridgeGateway.ErrorInvalidBatchConfig.selector);
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(1, 0, 1, 1, 0));
// succeed
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(1, 2, 3, 4, 5));
(
uint96 feeAmountPerTx,
uint96 minAmountPerTx,
uint16 maxTxsPerBatch,
uint24 maxDelayPerBatch,
uint24 safeBridgeGasLimit
) = batch.configs(address(0));
assertEq(feeAmountPerTx, 1);
assertEq(minAmountPerTx, 2);
assertEq(maxTxsPerBatch, 3);
assertEq(maxDelayPerBatch, 4);
assertEq(safeBridgeGasLimit, 5);
}
function testSetTokenSettingFuzzing(address token, L1BatchBridgeGateway.BatchConfig memory config) external {
hevm.assume(config.maxTxsPerBatch > 0);
hevm.assume(config.maxDelayPerBatch > 0);
hevm.assume(config.feeAmountPerTx <= config.minAmountPerTx);
(
uint96 feeAmountPerTx,
uint96 minAmountPerTx,
uint16 maxTxsPerBatch,
uint24 maxDelayPerBatch,
uint24 safeBridgeGasLimit
) = batch.configs(token);
assertEq(feeAmountPerTx, 0);
assertEq(minAmountPerTx, 0);
assertEq(maxTxsPerBatch, 0);
assertEq(maxDelayPerBatch, 0);
assertEq(safeBridgeGasLimit, 0);
batch.setBatchConfig(token, config);
(feeAmountPerTx, minAmountPerTx, maxTxsPerBatch, maxDelayPerBatch, safeBridgeGasLimit) = batch.configs(token);
assertEq(feeAmountPerTx, config.feeAmountPerTx);
assertEq(minAmountPerTx, config.minAmountPerTx);
assertEq(maxTxsPerBatch, config.maxTxsPerBatch);
assertEq(maxDelayPerBatch, config.maxDelayPerBatch);
assertEq(safeBridgeGasLimit, config.safeBridgeGasLimit);
}
function checkBatchState(
address token,
uint256 phase,
L1BatchBridgeGateway.BatchState memory expected
) private {
(uint128 amount, uint64 startTime, uint64 numDeposits, bytes32 hash) = batch.batches(token, phase);
assertEq(amount, expected.amount);
assertEq(startTime, expected.startTime);
assertEq(numDeposits, expected.numDeposits);
// assertEq(hash, expected.hash);
}
function checkTokenState(address token, L1BatchBridgeGateway.TokenState memory expected) private {
(uint128 pending, uint64 currentBatchIndex, uint64 pendingBatchIndex) = batch.tokens(token);
assertEq(pending, expected.pending);
assertEq(currentBatchIndex, expected.currentBatchIndex);
assertEq(pendingBatchIndex, expected.pendingBatchIndex);
}
function testDepositETH() external {
// revert token not supported
hevm.expectRevert(L1BatchBridgeGateway.ErrorTokenNotSupported.selector);
batch.depositETH();
// revert deposit amount too small
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 100, 2, 100, ETH_DEPOSIT_SAFE_GAS_LIMIT));
hevm.expectRevert(L1BatchBridgeGateway.ErrorDepositAmountTooSmall.selector);
batch.depositETH{value: 10}();
// no fee
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 2, 100, ETH_DEPOSIT_SAFE_GAS_LIMIT));
assertEq(0, address(batch).balance);
checkBatchState(address(0), 0, L1BatchBridgeGateway.BatchState(0, 0, 0, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(0, 0, 0));
hevm.warp(1000001);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 0, 1000, 0);
batch.depositETH{value: 1000}();
assertEq(1000, address(batch).balance);
checkBatchState(address(0), 0, L1BatchBridgeGateway.BatchState(1000, 1000001, 1, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(1000, 0, 0));
hevm.warp(1000002);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 0, 2000, 0);
batch.depositETH{value: 2000}();
assertEq(3000, address(batch).balance);
checkBatchState(address(0), 0, L1BatchBridgeGateway.BatchState(3000, 1000001, 2, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(3000, 0, 0));
hevm.warp(1000003);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 1, 3000, 0);
batch.depositETH{value: 3000}();
assertEq(6000, address(batch).balance);
checkBatchState(address(0), 1, L1BatchBridgeGateway.BatchState(3000, 1000003, 1, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(6000, 1, 0));
// with fee
batch.setBatchConfig(
address(0),
L1BatchBridgeGateway.BatchConfig(100, 1000, 2, 100, ETH_DEPOSIT_SAFE_GAS_LIMIT)
);
hevm.warp(1000004);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 1, 1000, 100);
batch.depositETH{value: 1000}();
assertEq(7000, address(batch).balance);
checkBatchState(address(0), 1, L1BatchBridgeGateway.BatchState(3900, 1000003, 2, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(6900, 1, 0));
hevm.warp(1000005);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 2, 2000, 100);
batch.depositETH{value: 2000}();
assertEq(9000, address(batch).balance);
checkBatchState(address(0), 2, L1BatchBridgeGateway.BatchState(1900, 1000005, 1, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(8800, 2, 0));
hevm.warp(1000006);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 2, 3000, 100);
batch.depositETH{value: 3000}();
assertEq(12000, address(batch).balance);
checkBatchState(address(0), 2, L1BatchBridgeGateway.BatchState(4800, 1000005, 2, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(11700, 2, 0));
// switch phase by timestamp
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 100, 100, ETH_DEPOSIT_SAFE_GAS_LIMIT));
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 2, 1000, 0);
batch.depositETH{value: 1000}();
assertEq(13000, address(batch).balance);
checkBatchState(address(0), 2, L1BatchBridgeGateway.BatchState(5800, 1000005, 3, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(12700, 2, 0));
hevm.warp(1000005 + 100 + 1);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 3, 1000, 0);
batch.depositETH{value: 1000}();
assertEq(14000, address(batch).balance);
checkBatchState(address(0), 3, L1BatchBridgeGateway.BatchState(1000, 1000005 + 100 + 1, 1, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(13700, 3, 0));
}
function testDepositERC20() external {
// revert token is zero
hevm.expectRevert(L1BatchBridgeGateway.ErrorIncorrectMethodForETHDeposit.selector);
batch.depositERC20(address(0), 0);
// revert token not supported
hevm.expectRevert(L1BatchBridgeGateway.ErrorTokenNotSupported.selector);
batch.depositERC20(address(l1Token), 0);
// revert deposit amount too small
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(0, 100, 2, 100, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
l1Token.approve(address(batch), 10);
hevm.expectRevert(L1BatchBridgeGateway.ErrorDepositAmountTooSmall.selector);
batch.depositERC20(address(l1Token), 10);
// no fee
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(0, 0, 2, 100, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
assertEq(0, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 0, L1BatchBridgeGateway.BatchState(0, 0, 0, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(0, 0, 0));
hevm.warp(1000001);
l1Token.approve(address(batch), 1000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 0, 1000, 0);
batch.depositERC20(address(l1Token), 1000);
assertEq(1000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 0, L1BatchBridgeGateway.BatchState(1000, 1000001, 1, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(1000, 0, 0));
hevm.warp(1000002);
l1Token.approve(address(batch), 2000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 0, 2000, 0);
batch.depositERC20(address(l1Token), 2000);
assertEq(3000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 0, L1BatchBridgeGateway.BatchState(3000, 1000001, 2, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(3000, 0, 0));
hevm.warp(1000003);
l1Token.approve(address(batch), 3000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 1, 3000, 0);
batch.depositERC20(address(l1Token), 3000);
assertEq(6000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 1, L1BatchBridgeGateway.BatchState(3000, 1000003, 1, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(6000, 1, 0));
// with fee
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(100, 1000, 2, 100, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
hevm.warp(1000004);
l1Token.approve(address(batch), 1000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 1, 1000, 100);
batch.depositERC20(address(l1Token), 1000);
assertEq(7000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 1, L1BatchBridgeGateway.BatchState(3900, 1000003, 2, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(6900, 1, 0));
hevm.warp(1000005);
l1Token.approve(address(batch), 2000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 2, 2000, 100);
batch.depositERC20(address(l1Token), 2000);
assertEq(9000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 2, L1BatchBridgeGateway.BatchState(1900, 1000005, 1, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(8800, 2, 0));
hevm.warp(1000006);
l1Token.approve(address(batch), 3000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 2, 3000, 100);
batch.depositERC20(address(l1Token), 3000);
assertEq(12000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 2, L1BatchBridgeGateway.BatchState(4800, 1000005, 2, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(11700, 2, 0));
// switch phase by timestamp
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(0, 0, 100, 100, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
l1Token.approve(address(batch), 1000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 2, 1000, 0);
batch.depositERC20(address(l1Token), 1000);
assertEq(13000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 2, L1BatchBridgeGateway.BatchState(5800, 1000005, 3, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(12700, 2, 0));
hevm.warp(1000005 + 100 + 1);
l1Token.approve(address(batch), 1000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 3, 1000, 0);
batch.depositERC20(address(l1Token), 1000);
assertEq(14000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 3, L1BatchBridgeGateway.BatchState(1000, 1000005 + 100 + 1, 1, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(13700, 3, 0));
}
function testBatchBridgeFailure() external {
// revert not keeper
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0xfc8737ab85eb45125971625a9ebdb75cc78e01d5c1fa80c4c6e5203f47bc4fab"
);
batch.executeBatchDeposit(address(0));
hevm.stopPrank();
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// revert token not supported
hevm.expectRevert(L1BatchBridgeGateway.ErrorTokenNotSupported.selector);
batch.executeBatchDeposit(address(0));
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 1, 1, ETH_DEPOSIT_SAFE_GAS_LIMIT));
// revert no pending
hevm.expectRevert(L1BatchBridgeGateway.ErrorNoPendingBatch.selector);
batch.executeBatchDeposit(address(0));
// revert insufficient msg.value
batch.depositETH{value: 1000}();
hevm.expectRevert(L1BatchBridgeGateway.ErrorInsufficientMsgValueForBatchDepositFee.selector);
batch.executeBatchDeposit(address(0));
hevm.expectRevert(L1BatchBridgeGateway.ErrorInsufficientMsgValueForBatchDepositFee.selector);
batch.executeBatchDeposit{value: L2_GAS_PRICE * ETH_DEPOSIT_SAFE_GAS_LIMIT}(address(0));
hevm.expectRevert(L1BatchBridgeGateway.ErrorInsufficientMsgValueForBatchDepositFee.selector);
batch.executeBatchDeposit{value: L2_GAS_PRICE * (SAFE_BATCH_BRIDGE_GAS_LIMIT + ETH_DEPOSIT_SAFE_GAS_LIMIT) - 1}(
address(0)
);
// succeed
batch.executeBatchDeposit{value: L2_GAS_PRICE * (SAFE_BATCH_BRIDGE_GAS_LIMIT + ETH_DEPOSIT_SAFE_GAS_LIMIT)}(
address(0)
);
}
function testBatchBridgeETH() external {
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// no deposit fee
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 1, 1, ETH_DEPOSIT_SAFE_GAS_LIMIT));
batch.depositETH{value: 1000}();
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(1000, 0, 0));
// emit SentMessage by deposit ETH
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(batch), address(counterpartBatch), 1000, 0, ETH_DEPOSIT_SAFE_GAS_LIMIT, "");
// emit SentMessage by batchBridge
hevm.expectEmit(true, true, false, true);
emit SentMessage(
address(batch),
address(counterpartBatch),
0,
1,
SAFE_BATCH_BRIDGE_GAS_LIMIT,
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(
address(0),
address(0),
0,
BatchBridgeCodec.hash(
BatchBridgeCodec.encodeInitialNode(address(0), 0),
BatchBridgeCodec.encodeNode(address(this), 1000)
)
)
)
);
// emit BatchBridge
hevm.expectEmit(true, true, true, true);
emit BatchDeposit(address(this), address(0), 0, address(0));
uint256 batchFeeVaultBefore = batchFeeVault.balance;
uint256 messengerBefore = address(l1Messenger).balance;
batch.executeBatchDeposit{value: 1 ether}(address(0));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(0, 1, 1));
assertEq(batchFeeVaultBefore, batchFeeVault.balance);
assertEq(messengerBefore + 1000, address(l1Messenger).balance);
// has deposit fee = 100
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(100, 1000, 1, 1, ETH_DEPOSIT_SAFE_GAS_LIMIT));
batch.depositETH{value: 1000}();
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(900, 1, 1));
// emit SentMessage by deposit ETH
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(batch), address(counterpartBatch), 900, 2, ETH_DEPOSIT_SAFE_GAS_LIMIT, "");
// emit SentMessage by batchBridge
hevm.expectEmit(true, true, false, true);
emit SentMessage(
address(batch),
address(counterpartBatch),
0,
3,
SAFE_BATCH_BRIDGE_GAS_LIMIT,
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(
address(0),
address(0),
1,
BatchBridgeCodec.hash(
BatchBridgeCodec.encodeInitialNode(address(0), 1),
BatchBridgeCodec.encodeNode(address(this), 900)
)
)
)
);
// emit BatchBridge
hevm.expectEmit(true, true, true, true);
emit BatchDeposit(address(this), address(0), 1, address(0));
batchFeeVaultBefore = batchFeeVault.balance;
messengerBefore = address(l1Messenger).balance;
batch.executeBatchDeposit{value: 1 ether}(address(0));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(0, 2, 2));
assertEq(batchFeeVaultBefore + 100, batchFeeVault.balance);
assertEq(messengerBefore + 900, address(l1Messenger).balance);
}
function testBatchBridgeERC20() external {
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// no deposit fee
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(0, 0, 1, 1, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
l1Token.approve(address(batch), 1000);
batch.depositERC20(address(l1Token), 1000);
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(1000, 0, 0));
bytes memory message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector,
address(l1Token),
address(l2Token),
address(batch),
address(counterpartBatch),
1000,
new bytes(0)
);
// emit SentMessage by deposit ERC20
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(gateway), address(counterpartGateway), 0, 0, ERC20_DEPOSIT_SAFE_GAS_LIMIT, message);
// emit SentMessage by batchBridge
hevm.expectEmit(true, true, false, true);
emit SentMessage(
address(batch),
address(counterpartBatch),
0,
1,
SAFE_BATCH_BRIDGE_GAS_LIMIT,
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(
address(l1Token),
address(l2Token),
0,
BatchBridgeCodec.hash(
BatchBridgeCodec.encodeInitialNode(address(l1Token), 0),
BatchBridgeCodec.encodeNode(address(this), 1000)
)
)
)
);
// emit BatchBridge
hevm.expectEmit(true, true, true, true);
emit BatchDeposit(address(this), address(l1Token), 0, address(l2Token));
uint256 batchFeeVaultBefore = l1Token.balanceOf(batchFeeVault);
uint256 gatewayBefore = l1Token.balanceOf(address(gateway));
batch.executeBatchDeposit{value: 1 ether}(address(l1Token));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(0, 1, 1));
assertEq(batchFeeVaultBefore, l1Token.balanceOf(batchFeeVault));
assertEq(gatewayBefore + 1000, l1Token.balanceOf(address(gateway)));
// has deposit fee = 100
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(100, 1000, 1, 1, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
l1Token.approve(address(batch), 1000);
batch.depositERC20(address(l1Token), 1000);
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(900, 1, 1));
message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector,
address(l1Token),
address(l2Token),
address(batch),
address(counterpartBatch),
900,
new bytes(0)
);
// emit SentMessage by deposit ERC20
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(gateway), address(counterpartGateway), 0, 2, ERC20_DEPOSIT_SAFE_GAS_LIMIT, message);
// emit SentMessage by batchBridge
hevm.expectEmit(true, true, false, true);
emit SentMessage(
address(batch),
address(counterpartBatch),
0,
3,
SAFE_BATCH_BRIDGE_GAS_LIMIT,
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(
address(l1Token),
address(l2Token),
1,
BatchBridgeCodec.hash(
BatchBridgeCodec.encodeInitialNode(address(l1Token), 1),
BatchBridgeCodec.encodeNode(address(this), 900)
)
)
)
);
// emit BatchBridge
hevm.expectEmit(true, true, true, true);
emit BatchDeposit(address(this), address(l1Token), 1, address(l2Token));
batchFeeVaultBefore = l1Token.balanceOf(batchFeeVault);
gatewayBefore = l1Token.balanceOf(address(gateway));
batch.executeBatchDeposit{value: 1 ether}(address(l1Token));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(0, 2, 2));
assertEq(batchFeeVaultBefore + 100, l1Token.balanceOf(batchFeeVault));
assertEq(gatewayBefore + 900, l1Token.balanceOf(address(gateway)));
}
}

View File

@@ -0,0 +1,454 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
import {ITransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
import {Strings} from "@openzeppelin/contracts/utils/Strings.sol";
import {L1BatchBridgeGateway} from "../../batch-bridge/L1BatchBridgeGateway.sol";
import {L2BatchBridgeGateway} from "../../batch-bridge/L2BatchBridgeGateway.sol";
import {BatchBridgeCodec} from "../../batch-bridge/BatchBridgeCodec.sol";
import {RevertOnTransferToken} from "../mocks/tokens/RevertOnTransferToken.sol";
import {MockScrollMessenger} from "../mocks/MockScrollMessenger.sol";
import {ScrollTestBase} from "../ScrollTestBase.t.sol";
contract L2BatchBridgeGatewayTest is ScrollTestBase {
event UpdateTokenMapping(address indexed l2Token, address indexed oldL1Token, address indexed newL1Token);
event FinalizeBatchDeposit(address indexed l1Token, address indexed l2Token, uint256 indexed batchIndex);
event BatchDistribute(address indexed l1Token, address indexed l2Token, uint256 indexed batchIndex);
event DistributeFailed(address indexed l2Token, uint256 indexed batchIndex, address receiver, uint256 amount);
L1BatchBridgeGateway private counterpartBatch;
L2BatchBridgeGateway private batch;
MockScrollMessenger messenger;
MockERC20 private l1Token;
MockERC20 private l2Token;
RevertOnTransferToken private maliciousL2Token;
bool revertOnReceive;
bool loopOnReceive;
// two safe EOAs to receive ETH
address private recipient1;
address private recipient2;
receive() external payable {
if (revertOnReceive) revert();
if (loopOnReceive) {
for (uint256 i = 0; i < 1000000000; i++) {
recipient1 = address(uint160(address(this)) - 1);
}
}
}
function setUp() public {
__ScrollTestBase_setUp();
recipient1 = address(uint160(address(this)) - 1);
recipient2 = address(uint160(address(this)) - 2);
// Deploy tokens
l1Token = new MockERC20("Mock L1", "ML1", 18);
l2Token = new MockERC20("Mock L2", "ML2", 18);
maliciousL2Token = new RevertOnTransferToken("X", "Y", 18);
messenger = new MockScrollMessenger();
counterpartBatch = new L1BatchBridgeGateway(address(1), address(1), address(1), address(1));
batch = L2BatchBridgeGateway(payable(_deployProxy(address(0))));
// Initialize L2 contracts
admin.upgrade(
ITransparentUpgradeableProxy(address(batch)),
address(new L2BatchBridgeGateway(address(counterpartBatch), address(messenger)))
);
batch.initialize();
}
function testInitialized() external {
assertBoolEq(true, batch.hasRole(bytes32(0), address(this)));
assertEq(address(counterpartBatch), batch.counterpart());
assertEq(address(messenger), batch.messenger());
hevm.expectRevert("Initializable: contract is already initialized");
batch.initialize();
}
function testFinalizeBatchDeposit() external {
// revert caller not messenger
hevm.expectRevert(L2BatchBridgeGateway.ErrorCallerNotMessenger.selector);
batch.finalizeBatchDeposit(address(0), address(0), 0, bytes32(0));
// revert xDomainMessageSender not counterpart
hevm.expectRevert(L2BatchBridgeGateway.ErrorMessageSenderNotCounterpart.selector);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(l2Token), 0, bytes32(0))
)
);
messenger.setXDomainMessageSender(address(counterpartBatch));
// emit FinalizeBatchDeposit
assertEq(address(0), batch.tokenMapping(address(l2Token)));
hevm.expectEmit(true, true, true, true);
emit FinalizeBatchDeposit(address(l1Token), address(l2Token), 1);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(l2Token), 1, bytes32(uint256(1)))
)
);
assertEq(address(l1Token), batch.tokenMapping(address(l2Token)));
assertEq(batch.batchHashes(address(l2Token), 1), bytes32(uint256(1)));
// revert token not match
hevm.expectRevert(L2BatchBridgeGateway.ErrorL1TokenMismatched.selector);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (address(0), address(l2Token), 0, bytes32(0)))
);
}
function testFinalizeBatchDepositFuzzing(
address token1,
address token2,
uint256 batchIndex,
bytes32 hash
) external {
messenger.setXDomainMessageSender(address(counterpartBatch));
assertEq(address(0), batch.tokenMapping(token2));
hevm.expectEmit(true, true, true, true);
emit FinalizeBatchDeposit(token1, token2, batchIndex);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (token1, token2, batchIndex, hash))
);
assertEq(token1, batch.tokenMapping(token2));
assertEq(batch.batchHashes(token2, batchIndex), hash);
}
function testDistributeETH() external {
// revert not keeper
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0xfc8737ab85eb45125971625a9ebdb75cc78e01d5c1fa80c4c6e5203f47bc4fab"
);
batch.distribute(address(0), 0, new bytes32[](0));
hevm.stopPrank();
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// revert ErrorBatchHashMismatch
hevm.expectRevert(L2BatchBridgeGateway.ErrorBatchHashMismatch.selector);
batch.distribute(address(0), 1, new bytes32[](0));
// send some ETH to `L2BatchBridgeGateway`.
messenger.setXDomainMessageSender(address(counterpartBatch));
messenger.callTarget{value: 1 ether}(address(batch), "");
address[] memory receivers = new address[](2);
uint256[] memory amounts = new uint256[](2);
receivers[0] = recipient1;
receivers[1] = recipient2;
amounts[0] = 100;
amounts[1] = 200;
// all success
(bytes32[] memory nodes, bytes32 batchHash) = _encodeNodes(address(0), 0, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (address(0), address(0), 0, batchHash))
);
assertEq(0, recipient1.balance);
assertEq(0, recipient2.balance);
uint256 batchBalanceBefore = address(batch).balance;
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(0), address(0), 0);
batch.distribute(address(0), 0, nodes);
assertEq(100, recipient1.balance);
assertEq(200, recipient2.balance);
assertEq(batchBalanceBefore - 300, address(batch).balance);
assertBoolEq(true, batch.isDistributed(batchHash));
// revert ErrorBatchDistributed
hevm.expectRevert(L2BatchBridgeGateway.ErrorBatchDistributed.selector);
batch.distribute(address(0), 0, nodes);
// all failed due to revert
revertOnReceive = true;
loopOnReceive = false;
receivers[0] = address(this);
receivers[1] = address(this);
(nodes, batchHash) = _encodeNodes(address(0), 1, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (address(0), address(0), 1, batchHash))
);
uint256 thisBalanceBefore = address(this).balance;
batchBalanceBefore = address(batch).balance;
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(0), 1, address(this), 100);
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(0), 1, address(this), 200);
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(0), address(0), 1);
batch.distribute(address(0), 1, nodes);
assertEq(batchBalanceBefore, address(batch).balance);
assertEq(thisBalanceBefore, address(this).balance);
assertBoolEq(true, batch.isDistributed(batchHash));
assertEq(300, batch.failedAmount(address(0)));
// all failed due to out of gas
revertOnReceive = false;
loopOnReceive = true;
(nodes, batchHash) = _encodeNodes(address(0), 2, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (address(0), address(0), 2, batchHash))
);
thisBalanceBefore = address(this).balance;
batchBalanceBefore = address(batch).balance;
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(0), 2, address(this), 100);
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(0), 2, address(this), 200);
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(0), address(0), 2);
batch.distribute(address(0), 2, nodes);
assertEq(batchBalanceBefore, address(batch).balance);
assertEq(thisBalanceBefore, address(this).balance);
assertBoolEq(true, batch.isDistributed(batchHash));
assertEq(600, batch.failedAmount(address(0)));
}
function testDistributeERC20() external {
// revert not keeper
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0xfc8737ab85eb45125971625a9ebdb75cc78e01d5c1fa80c4c6e5203f47bc4fab"
);
batch.distribute(address(l2Token), 0, new bytes32[](0));
hevm.stopPrank();
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// revert ErrorBatchHashMismatch
hevm.expectRevert(L2BatchBridgeGateway.ErrorBatchHashMismatch.selector);
batch.distribute(address(l2Token), 1, new bytes32[](0));
// mint some ERC20 to `L2BatchBridgeGateway`.
messenger.setXDomainMessageSender(address(counterpartBatch));
l2Token.mint(address(batch), 1 ether);
address[] memory receivers = new address[](2);
uint256[] memory amounts = new uint256[](2);
receivers[0] = recipient1;
receivers[1] = recipient2;
amounts[0] = 100;
amounts[1] = 200;
// all success
(bytes32[] memory nodes, bytes32 batchHash) = _encodeNodes(address(l1Token), 0, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(l2Token), 0, batchHash)
)
);
assertEq(0, recipient1.balance);
assertEq(0, recipient2.balance);
uint256 batchBalanceBefore = l2Token.balanceOf(address(batch));
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(l1Token), address(l2Token), 0);
batch.distribute(address(l2Token), 0, nodes);
assertEq(100, l2Token.balanceOf(recipient1));
assertEq(200, l2Token.balanceOf(recipient2));
assertEq(batchBalanceBefore - 300, l2Token.balanceOf(address(batch)));
assertBoolEq(true, batch.isDistributed(batchHash));
// revert ErrorBatchDistributed
hevm.expectRevert(L2BatchBridgeGateway.ErrorBatchDistributed.selector);
batch.distribute(address(l2Token), 0, nodes);
maliciousL2Token.mint(address(batch), 1 ether);
// all failed due to revert
maliciousL2Token.setRevertOnTransfer(true);
receivers[0] = address(this);
receivers[1] = address(this);
(nodes, batchHash) = _encodeNodes(address(l1Token), 1, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(maliciousL2Token), 1, batchHash)
)
);
uint256 thisBalanceBefore = maliciousL2Token.balanceOf(address(this));
batchBalanceBefore = maliciousL2Token.balanceOf(address(batch));
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(maliciousL2Token), 1, address(this), 100);
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(maliciousL2Token), 1, address(this), 200);
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(l1Token), address(maliciousL2Token), 1);
batch.distribute(address(maliciousL2Token), 1, nodes);
assertEq(batchBalanceBefore, maliciousL2Token.balanceOf(address(batch)));
assertEq(thisBalanceBefore, maliciousL2Token.balanceOf(address(this)));
assertBoolEq(true, batch.isDistributed(batchHash));
assertEq(300, batch.failedAmount(address(maliciousL2Token)));
// all failed due to transfer return false
maliciousL2Token.setRevertOnTransfer(false);
maliciousL2Token.setTransferReturn(false);
(nodes, batchHash) = _encodeNodes(address(l1Token), 2, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(maliciousL2Token), 2, batchHash)
)
);
thisBalanceBefore = maliciousL2Token.balanceOf(address(this));
batchBalanceBefore = maliciousL2Token.balanceOf(address(batch));
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(maliciousL2Token), 2, address(this), 100);
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(maliciousL2Token), 2, address(this), 200);
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(l1Token), address(maliciousL2Token), 2);
batch.distribute(address(maliciousL2Token), 2, nodes);
assertEq(batchBalanceBefore, maliciousL2Token.balanceOf(address(batch)));
assertEq(thisBalanceBefore, maliciousL2Token.balanceOf(address(this)));
assertBoolEq(true, batch.isDistributed(batchHash));
assertEq(600, batch.failedAmount(address(maliciousL2Token)));
}
function testWithdrawFailedAmountETH() external {
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// revert not admin
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
);
batch.withdrawFailedAmount(address(0), address(this));
hevm.stopPrank();
// revert no failed
hevm.expectRevert(L2BatchBridgeGateway.ErrorNoFailedDistribution.selector);
batch.withdrawFailedAmount(address(0), address(this));
// send some ETH to `L2BatchBridgeGateway`.
messenger.setXDomainMessageSender(address(counterpartBatch));
messenger.callTarget{value: 1 ether}(address(batch), "");
// make a failed distribution
address[] memory receivers = new address[](2);
uint256[] memory amounts = new uint256[](2);
receivers[0] = address(this);
receivers[1] = address(this);
amounts[0] = 100;
amounts[1] = 200;
revertOnReceive = true;
(bytes32[] memory nodes, bytes32 batchHash) = _encodeNodes(address(0), 1, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (address(0), address(0), 1, batchHash))
);
assertEq(0, batch.failedAmount(address(0)));
batch.distribute(address(0), 1, nodes);
assertEq(300, batch.failedAmount(address(0)));
// withdraw failed
uint256 thisBalance = recipient1.balance;
uint256 batchBalance = address(batch).balance;
batch.withdrawFailedAmount(address(0), recipient1);
assertEq(0, batch.failedAmount(address(0)));
assertEq(thisBalance + 300, recipient1.balance);
assertEq(batchBalance - 300, address(batch).balance);
// revert no failed
hevm.expectRevert(L2BatchBridgeGateway.ErrorNoFailedDistribution.selector);
batch.withdrawFailedAmount(address(0), recipient1);
}
function testWithdrawFailedAmountERC20() external {
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// revert not admin
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
);
batch.withdrawFailedAmount(address(0), address(this));
hevm.stopPrank();
// revert no failed
hevm.expectRevert(L2BatchBridgeGateway.ErrorNoFailedDistribution.selector);
batch.withdrawFailedAmount(address(0), address(this));
// send some ETH to `L2BatchBridgeGateway`.
messenger.setXDomainMessageSender(address(counterpartBatch));
maliciousL2Token.mint(address(batch), 1 ether);
// make a failed distribution
address[] memory receivers = new address[](2);
uint256[] memory amounts = new uint256[](2);
receivers[0] = address(this);
receivers[1] = address(this);
amounts[0] = 100;
amounts[1] = 200;
maliciousL2Token.setRevertOnTransfer(true);
(bytes32[] memory nodes, bytes32 batchHash) = _encodeNodes(address(l1Token), 1, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(maliciousL2Token), 1, batchHash)
)
);
assertEq(0, batch.failedAmount(address(maliciousL2Token)));
batch.distribute(address(maliciousL2Token), 1, nodes);
assertEq(300, batch.failedAmount(address(maliciousL2Token)));
// withdraw failed
maliciousL2Token.setRevertOnTransfer(false);
maliciousL2Token.setTransferReturn(true);
uint256 thisBalance = maliciousL2Token.balanceOf(recipient1);
uint256 batchBalance = maliciousL2Token.balanceOf(address(batch));
batch.withdrawFailedAmount(address(maliciousL2Token), recipient1);
assertEq(0, batch.failedAmount(address(maliciousL2Token)));
assertEq(thisBalance + 300, maliciousL2Token.balanceOf(recipient1));
assertEq(batchBalance - 300, maliciousL2Token.balanceOf(address(batch)));
// revert no failed
hevm.expectRevert(L2BatchBridgeGateway.ErrorNoFailedDistribution.selector);
batch.withdrawFailedAmount(address(maliciousL2Token), recipient1);
}
function _encodeNodes(
address token,
uint256 batchIndex,
address[] memory receivers,
uint256[] memory amounts
) private returns (bytes32[] memory nodes, bytes32 hash) {
nodes = new bytes32[](receivers.length);
hash = BatchBridgeCodec.encodeInitialNode(token, uint64(batchIndex));
for (uint256 i = 0; i < receivers.length; i++) {
nodes[i] = BatchBridgeCodec.encodeNode(receivers[i], uint96(amounts[i]));
hash = BatchBridgeCodec.hash(hash, nodes[i]);
}
}
}

View File

@@ -0,0 +1,45 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
// solhint-disable no-empty-blocks
contract RevertOnTransferToken is MockERC20 {
bool private revertOnTransfer;
bool private transferReturn;
constructor(
string memory _name,
string memory _symbol,
uint8 _decimals
) MockERC20(_name, _symbol, _decimals) {
transferReturn = true;
}
function setRevertOnTransfer(bool _revertOnTransfer) external payable {
revertOnTransfer = _revertOnTransfer;
}
function setTransferReturn(bool _transferReturn) external payable {
transferReturn = _transferReturn;
}
function transfer(address to, uint256 amount) public virtual override returns (bool) {
if (revertOnTransfer) revert();
if (!transferReturn) return false;
balanceOf[msg.sender] -= amount;
// Cannot overflow because the sum of all user
// balances can't exceed the max uint256 value.
unchecked {
balanceOf[to] += amount;
}
emit Transfer(msg.sender, to, amount);
return true;
}
}

View File

@@ -95,7 +95,7 @@ func (c *Collector) Stop() {
c.stopCleanChallengeChan <- struct{}{}
}
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
// timeoutBatchProofTask cron check the send task is timeout. if timeout reached, restore the
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
func (c *Collector) timeoutBatchProofTask() {
defer func() {

View File

@@ -19,13 +19,12 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/database/migrate"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/database/migrate"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"

View File

@@ -667,6 +667,8 @@ github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMP
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI=
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ=
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
@@ -1490,7 +1492,11 @@ github.com/scroll-tech/go-ethereum v1.10.14-0.20230306131930-03b4de32b78b/go.mod
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04/go.mod h1:jH8c08L9K8Hieaf0r/ur2P/cpesn4dFhmLm2Mmoi8kI=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230802095950-4b2bbf6225e7/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
@@ -1559,9 +1565,6 @@ github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZy
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM=
github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=

2
l2geth

Submodule l2geth updated: 38a3a9c919...246955a4df

View File

@@ -9,12 +9,12 @@ import (
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/prover/config"
"scroll-tech/common/cmd"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/prover/config"
)
var (

View File

@@ -31,9 +31,9 @@ clean: ## Empty out the bin folder
@rm -rf build/bin
docker_push:
docker docker push scrolltech/gas-oracle:${IMAGE_VERSION}
docker docker push scrolltech/event-watcher:${IMAGE_VERSION}
docker docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
docker push scrolltech/gas-oracle:${IMAGE_VERSION}
docker push scrolltech/event-watcher:${IMAGE_VERSION}
docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/gas-oracle:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/gas_oracle.Dockerfile

View File

@@ -16,7 +16,7 @@ go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
2. `solc`
See https://docs.soliditylang.org/en/latest/installing-solidity.html
Ensure you install the version of solc required by [MockBridge.sol](./mock_bridge/MockBridge.sol#L2) (e.g., 0.8.24). See https://docs.soliditylang.org/en/latest/installing-solidity.html
## Build
@@ -31,7 +31,7 @@ make rollup_bins
(Note: make sure you use different private keys for different senders in config.json.)
```bash
./build/bin/event_watcher --config ./config.json
./build/bin/gas_oracle --config ./config.json
./build/bin/rollup_relayer --config ./config.json
./build/bin/event_watcher --config ./conf/config.json
./build/bin/gas_oracle --config ./conf/config.json
./build/bin/rollup_relayer --config ./conf/config.json
```

View File

@@ -7,11 +7,11 @@ import (
"testing"
"time"
"scroll-tech/rollup/internal/config"
"scroll-tech/common/cmd"
"scroll-tech/common/testcontainers"
"scroll-tech/common/utils"
"scroll-tech/rollup/internal/config"
)
// MockApp mockApp-test client manager.
@@ -85,7 +85,7 @@ func (b *MockApp) MockConfig(store bool) error {
return err
}
l1GethEndpoint, err := b.testApps.GetL1GethEndPoint()
l1GethEndpoint, err := b.testApps.GetPoSL1EndPoint()
if err != nil {
return err
}

View File

@@ -19,7 +19,9 @@
},
"gas_oracle_config": {
"min_gas_price": 0,
"gas_price_diff": 50000
"gas_price_diff": 50000,
"l1_base_fee_weight": 0.132,
"l1_blob_base_fee_weight": 0.145
},
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
}

View File

@@ -5,12 +5,12 @@ go 1.21
require (
github.com/agiledragon/gomonkey/v2 v2.11.0
github.com/consensys/gnark-crypto v0.12.1
github.com/crate-crypto/go-kzg-4844 v0.7.0
github.com/crate-crypto/go-kzg-4844 v1.0.0
github.com/gin-gonic/gin v1.9.1
github.com/go-resty/resty/v2 v2.7.0
github.com/holiman/uint256 v1.2.4
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
@@ -87,7 +87,7 @@ require (
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.7.1 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/smartystreets/assertions v1.13.1 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect

View File

@@ -43,8 +43,8 @@ github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJ
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -237,10 +237,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=

View File

@@ -70,8 +70,14 @@ type RelayerConfig struct {
type GasOracleConfig struct {
// MinGasPrice store the minimum gas price to set.
MinGasPrice uint64 `json:"min_gas_price"`
// GasPriceDiff store the percentage of gas price difference.
// GasPriceDiff is the minimum percentage of gas price difference to update gas oracle.
GasPriceDiff uint64 `json:"gas_price_diff"`
// The following configs are only for updating L1 gas price, used for sender in L2.
// The weight for L1 base fee.
L1BaseFeeWeight float64 `json:"l1_base_fee_weight"`
// The weight for L1 blob base fee.
L1BlobBaseFeeWeight float64 `json:"l1_blob_base_fee_weight"`
}
// relayerConfigAlias RelayerConfig alias name

View File

@@ -3,6 +3,7 @@ package relayer
import (
"context"
"fmt"
"math"
"math/big"
"github.com/prometheus/client_golang/prometheus"
@@ -35,9 +36,11 @@ type Layer1Relayer struct {
gasOracleSender *sender.Sender
l1GasOracleABI *abi.ABI
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
l1BaseFeeWeight float64
l1BlobBaseFeeWeight float64
l1BlockOrm *orm.L1Block
l2BlockOrm *orm.L2Block
@@ -86,8 +89,10 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
gasOracleSender: gasOracleSender,
l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
l1BaseFeeWeight: cfg.GasOracleConfig.L1BaseFeeWeight,
l1BlobBaseFeeWeight: cfg.GasOracleConfig.L1BlobBaseFeeWeight,
}
l1Relayer.metrics = initL1RelayerMetrics(reg)
@@ -140,7 +145,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
var baseFee uint64
if isBernoulli && block.BlobBaseFee != 0 {
baseFee = block.BlobBaseFee
baseFee = uint64(math.Ceil(r.l1BaseFeeWeight*float64(block.BaseFee) + r.l1BlobBaseFeeWeight*float64(block.BlobBaseFee)))
} else {
baseFee = block.BaseFee
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
dockercompose "scroll-tech/common/docker-compose/l1"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
@@ -26,9 +25,7 @@ var (
// config
cfg *config.Config
testApps *testcontainers.TestcontainerApps
posL1TestEnv *dockercompose.PoSL1TestEnv
testApps *testcontainers.TestcontainerApps
// l2geth client
l2Cli *ethclient.Client
@@ -53,15 +50,13 @@ func setupEnv(t *testing.T) {
cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err)
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
assert.NoError(t, err, "failed to create PoS L1 test environment")
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL2GethContainer())
assert.NoError(t, testApps.StartPoSL1Container())
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetPoSL1EndPoint()
assert.NoError(t, err)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
@@ -110,9 +105,6 @@ func TestMain(m *testing.M) {
if testApps != nil {
testApps.Free()
}
if posL1TestEnv != nil {
posL1TestEnv.Stop()
}
}()
m.Run()
}

View File

@@ -618,13 +618,13 @@ func makeSidecar(blob *kzg4844.Blob) (*gethTypes.BlobTxSidecar, error) {
var commitments []kzg4844.Commitment
var proofs []kzg4844.Proof
for _, b := range blobs {
c, err := kzg4844.BlobToCommitment(b)
for i := range blobs {
c, err := kzg4844.BlobToCommitment(&blobs[i])
if err != nil {
return nil, fmt.Errorf("failed to get blob commitment, err: %w", err)
}
p, err := kzg4844.ComputeBlobProof(b, c)
p, err := kzg4844.ComputeBlobProof(&blobs[i], c)
if err != nil {
return nil, fmt.Errorf("failed to compute blob proof, err: %w", err)
}

View File

@@ -26,7 +26,6 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
dockercompose "scroll-tech/common/docker-compose/l1"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
@@ -41,7 +40,6 @@ var (
privateKey *ecdsa.PrivateKey
cfg *config.Config
testApps *testcontainers.TestcontainerApps
posL1TestEnv *dockercompose.PoSL1TestEnv
txTypes = []string{"LegacyTx", "DynamicFeeTx", "DynamicFeeTx"}
txBlob = []*kzg4844.Blob{nil, nil, randBlob()}
txUint8Types = []uint8{0, 2, 3}
@@ -54,9 +52,6 @@ func TestMain(m *testing.M) {
if testApps != nil {
testApps.Free()
}
if posL1TestEnv != nil {
posL1TestEnv.Stop()
}
}()
m.Run()
}
@@ -73,16 +68,13 @@ func setupEnv(t *testing.T) {
assert.NoError(t, err)
privateKey = priv
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
assert.NoError(t, err, "failed to create PoS L1 test environment")
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartL2GethContainer())
assert.NoError(t, testApps.StartPoSL1Container())
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetPoSL1EndPoint()
assert.NoError(t, err)
db, err = testApps.GetGormDBClient()
assert.NoError(t, err)
@@ -90,7 +82,7 @@ func setupEnv(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
l1Client, err := posL1TestEnv.L1Client()
l1Client, err := testApps.GetPoSL1Client()
assert.NoError(t, err)
chainID, err := l1Client.ChainID(context.Background())
@@ -759,7 +751,7 @@ func testBlobTransactionWithBlobhashOpContractCall(t *testing.T) {
pointBigInt := new(big.Int).SetBytes(pointHash.Bytes())
point := kzg4844.Point(new(big.Int).Mod(pointBigInt, blsModulo).Bytes())
commitment := sideCar.Commitments[0]
proof, claim, err := kzg4844.ComputeProof(*blob, point)
proof, claim, err := kzg4844.ComputeProof(blob, point)
assert.NoError(t, err)
var claimArray [32]byte

View File

@@ -133,15 +133,13 @@ func (p *BatchProposer) TryProposeBatch() {
func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion encoding.CodecVersion) error {
err := p.db.Transaction(func(dbTX *gorm.DB) error {
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, codecVersion, dbTX)
dbBatch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, codecVersion, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure",
"start chunk index", batch.StartChunkIndex, "end chunk index", batch.EndChunkIndex, "error", dbErr)
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure", "index", batch.Index, "parent hash", batch.ParentBatchHash.Hex(), "error", dbErr)
return dbErr
}
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, batch.StartChunkIndex, batch.EndChunkIndex, batch.Hash, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", batch.Hash, "error", dbErr)
if dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex, dbBatch.Hash, dbTX); dbErr != nil {
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", dbBatch.Hash, "error", dbErr)
return dbErr
}
return nil

View File

@@ -192,27 +192,69 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
tests := []struct {
name string
maxChunkNum uint64
maxL1CommitGas uint64
maxL1CommitCalldataSize uint64
batchTimeoutSec uint64
forkBlock *big.Int
expectedBatchesLen int
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
}{
{
name: "NoLimitReached",
maxChunkNum: 10,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
name: "NoLimitReached",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "Timeout",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 0,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 2,
},
{
name: "MaxL1CommitGasPerBatchIs0",
maxChunkNum: 10,
maxL1CommitGas: 0,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "MaxL1CommitCalldataSizePerBatchIs0",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 0,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "MaxChunkNumPerBatchIs1",
maxChunkNum: 1,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
{
name: "MaxL1CommitGasPerBatchIsFirstChunk",
maxChunkNum: 10,
maxL1CommitGas: 190352,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
{
name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 60,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
@@ -220,6 +262,8 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
{
name: "ForkBlockReached",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
@@ -243,7 +287,7 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV1)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
@@ -252,7 +296,7 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV1)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
@@ -262,32 +306,34 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{
BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
BernoulliBlock: big.NewInt(0),
HomesteadBlock: tt.forkBlock,
}, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: tt.maxChunkNum,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
BatchTimeoutSec: tt.batchTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{
BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
BernoulliBlock: big.NewInt(0),
HomesteadBlock: tt.forkBlock,
}, db, nil)
bp.TryProposeBatch()
@@ -313,7 +359,7 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
}
}
func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
func testBatchCommitGasAndCalldataSizeCodecv0Estimation(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
@@ -393,6 +439,86 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
assert.Equal(t, uint64(6035), batches[0].TotalL1CommitCalldataSize)
}
func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 0,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, 2)
batches = batches[1:]
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Len(t, dbChunks, 2)
for _, chunk := range dbChunks {
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
}
assert.Equal(t, uint64(161270), batches[0].TotalL1CommitGas)
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
}
func testBatchProposerBlobSizeLimit(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
@@ -423,8 +549,8 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
@@ -443,8 +569,8 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: math.MaxUint64,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
MaxL1CommitGasPerBatch: math.MaxUint64,
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
BatchTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)

View File

@@ -205,6 +205,8 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
name string
maxBlockNum uint64
maxTxNum uint64
maxL1CommitGas uint64
maxL1CommitCalldataSize uint64
maxRowConsumption uint64
chunkTimeoutSec uint64
forkBlock *big.Int
@@ -212,42 +214,72 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
}{
{
name: "NoLimitReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
name: "NoLimitReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "Timeout",
maxBlockNum: 100,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 0,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 2,
},
{
name: "MaxTxNumPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 0,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
name: "MaxTxNumPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 0,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxRowConsumptionPerChunkIs0",
maxBlockNum: 100,
maxTxNum: 10000,
maxRowConsumption: 0,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
name: "MaxL1CommitGasPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 10000,
maxL1CommitGas: 0,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxL1CommitCalldataSizePerChunkIs0",
maxBlockNum: 10,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 0,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxRowConsumptionPerChunkIs0",
maxBlockNum: 100,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 0,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxBlockNumPerChunkIs1",
maxBlockNum: 1,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
@@ -257,6 +289,30 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
name: "MaxTxNumPerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 2,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxL1CommitGasPerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 10000,
maxL1CommitGas: 2522,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 60,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
@@ -266,6 +322,8 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
name: "MaxRowConsumptionPerChunkIs1",
maxBlockNum: 10,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
@@ -275,6 +333,8 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
name: "ForkBlockReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
@@ -295,12 +355,14 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: tt.maxBlockNum,
MaxTxNumPerChunk: tt.maxTxNum,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxL1CommitGasPerChunk: tt.maxL1CommitGas,
MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize,
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
ChunkTimeoutSec: tt.chunkTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock}, db, nil)
}, &params.ChainConfig{
BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
}, db, nil)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
@@ -337,8 +399,8 @@ func testChunkProposerCodecv1BlobSizeLimit(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,

View File

@@ -27,7 +27,7 @@ import (
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
db := setupDB(t)
client, err := testApps.GetL1GethClient()
client, err := testApps.GetPoSL1Client()
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)

View File

@@ -43,10 +43,10 @@ func setupEnv(t *testing.T) (err error) {
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartPoSL1Container())
assert.NoError(t, testApps.StartL2GethContainer())
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL1GethEndPoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetPoSL1EndPoint()
assert.NoError(t, err)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
@@ -109,10 +109,11 @@ func TestFunction(t *testing.T) {
t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits)
t.Run("TestChunkProposerCodecv1BlobSizeLimit", testChunkProposerCodecv1BlobSizeLimit)
// Run chunk proposer test cases.
// Run batch proposer test cases.
t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits)
t.Run("TestBatchProposerCodecv1Limits", testBatchProposerCodecv1Limits)
t.Run("TestBatchCommitGasAndCalldataSizeEstimation", testBatchCommitGasAndCalldataSizeEstimation)
t.Run("TestBatchCommitGasAndCalldataSizeCodecv0Estimation", testBatchCommitGasAndCalldataSizeCodecv0Estimation)
t.Run("TestBatchCommitGasAndCalldataSizeCodecv1Estimation", testBatchCommitGasAndCalldataSizeCodecv1Estimation)
t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit)
}

View File

@@ -12,13 +12,12 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/database/migrate"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/common/types/encoding/codecv1"
"scroll-tech/database/migrate"
)
var (

View File

@@ -76,7 +76,6 @@ type ChunkMetrics struct {
CrcMax uint64
FirstBlockTimestamp uint64
// codecv0 metrics, default 0 for codecv1
L1CommitCalldataSize uint64
L1CommitGas uint64
@@ -108,6 +107,8 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
}
return metrics, nil
case encoding.CodecV1:
metrics.L1CommitGas = codecv1.EstimateChunkL1CommitGas(chunk)
metrics.L1CommitCalldataSize = codecv1.EstimateChunkL1CommitCalldataSize(chunk)
metrics.L1CommitBlobSize, err = codecv1.EstimateChunkL1CommitBlobSize(chunk)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit blob size: %w", err)
@@ -124,7 +125,6 @@ type BatchMetrics struct {
NumChunks uint64
FirstBlockTimestamp uint64
// codecv0 metrics, default 0 for codecv1
L1CommitCalldataSize uint64
L1CommitGas uint64
@@ -151,6 +151,8 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
}
return metrics, nil
case encoding.CodecV1:
metrics.L1CommitGas = codecv1.EstimateBatchL1CommitGas(batch)
metrics.L1CommitCalldataSize = codecv1.EstimateBatchL1CommitCalldataSize(batch)
metrics.L1CommitBlobSize, err = codecv1.EstimateBatchL1CommitBlobSize(batch)
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit blob size: %w", err)

View File

@@ -14,7 +14,6 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/common/database"
dockercompose "scroll-tech/common/docker-compose/l1"
tc "scroll-tech/common/testcontainers"
"scroll-tech/common/utils"
@@ -34,9 +33,8 @@ import (
)
var (
testApps *tc.TestcontainerApps
rollupApp *bcmd.MockApp
posL1TestEnv *dockercompose.PoSL1TestEnv
testApps *tc.TestcontainerApps
rollupApp *bcmd.MockApp
// clients
l1Client *ethclient.Client
@@ -72,9 +70,6 @@ func TestMain(m *testing.M) {
if rollupApp != nil {
rollupApp.Free()
}
if posL1TestEnv != nil {
posL1TestEnv.Stop()
}
}()
m.Run()
}
@@ -89,17 +84,13 @@ func setupEnv(t *testing.T) {
l1GethChainID *big.Int
)
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
assert.NoError(t, err, "failed to create PoS L1 test environment")
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
testApps = tc.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartL2GethContainer())
assert.NoError(t, testApps.StartPoSL1Container())
rollupApp = bcmd.NewRollupApp(testApps, "../conf/config.json")
l1Client, err = posL1TestEnv.L1Client()
l1Client, err = testApps.GetPoSL1Client()
assert.NoError(t, err)
l2Client, err = testApps.GetL2GethClient()
assert.NoError(t, err)
@@ -114,8 +105,10 @@ func setupEnv(t *testing.T) {
l1Auth, err = bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, l1GethChainID)
assert.NoError(t, err)
rollupApp.Config.L1Config.Endpoint = posL1TestEnv.Endpoint()
rollupApp.Config.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
rollupApp.Config.L1Config.Endpoint, err = testApps.GetPoSL1EndPoint()
assert.NoError(t, err)
rollupApp.Config.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetPoSL1EndPoint()
assert.NoError(t, err)
port, err := rand.Int(rand.Reader, big.NewInt(10000))
assert.NoError(t, err)

View File

@@ -221,16 +221,16 @@ func testCommitBatchAndFinalizeBatch4844(t *testing.T) {
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 1,
MaxL1CommitCalldataSizePerChunk: 1,
MaxL1CommitGasPerChunk: 1000000,
MaxL1CommitCalldataSizePerChunk: 100000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, chainConfig, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 1,
MaxL1CommitGasPerBatch: 1000000,
MaxL1CommitCalldataSizePerBatch: 100000,
BatchTimeoutSec: 300,
}, chainConfig, db, nil)

View File

@@ -43,7 +43,7 @@ func TestMain(m *testing.M) {
func setupEnv(t *testing.T) {
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartPoSL1Container())
assert.NoError(t, testApps.StartL2GethContainer())
rollupApp = bcmd.NewRollupApp(testApps, "../../rollup/conf/config.json")
}