Compare commits

..

8 Commits

Author SHA1 Message Date
colin
c012f7132d feat(rollup-relayer): add sanity checks before committing and finalizing (#1714)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-08-11 17:49:29 +08:00
Jonas Theis
6897cc54bd feat(permissionless batches): batch production toolkit and operator recovery (#1555)
Signed-off-by: noelwei <fan@scroll.io>
Co-authored-by: Ömer Faruk Irmak <omerfirmak@gmail.com>
Co-authored-by: noelwei <fan@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Morty <70688412+yiweichi@users.noreply.github.com>
Co-authored-by: omerfirmak <omerfirmak@users.noreply.github.com>
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: kunxian xia <xiakunxian130@gmail.com>
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Morty <yiweichi1@gmail.com>
2025-08-04 12:37:31 +08:00
georgehao
d21fa36803 change l2watcher from w.GetBlockByNumberOrHash to BlockByNumber (#1715) 2025-07-31 18:29:50 +08:00
colin
fc75299eb3 fix(gas-oracle): nonce too low when resubmission (#1712) 2025-07-30 14:50:41 +08:00
Morty
4bfcd35d0c fix(bridge-history): update dependency go-ethereum version (#1713) 2025-07-30 14:32:12 +08:00
colin
6d62f8e5fa fix(gas-oracle): typos in config file example (#1711) 2025-07-28 18:12:50 +08:00
Morty
392ae07736 feat(blob-uploader): support codec v8 (#1707) 2025-07-24 01:34:46 +08:00
colin
db80b47820 fix(rollup-relayer): upgrade boundary message queue hash initialization (#1706) 2025-07-23 18:51:56 +08:00
45 changed files with 2787 additions and 90 deletions

View File

@@ -11,14 +11,14 @@ require (
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c // It's a hotfix for the header hash incompatibility issue, pls change this with caution
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
require (
dario.cat/mergo v1.0.0 // indirect

View File

@@ -311,8 +311,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c h1:IpEBKM6O+xOK2qZVZztGxcobFXkKMb5hAkBEVzfXjVg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 h1:u371VK8eOU2Z/0SVf5KDI3eJc8msHSpJbav4do/8n38=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=

View File

@@ -4,3 +4,5 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -4,3 +4,5 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -4,3 +4,5 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -1,5 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
*target/*
permissionless-batches/conf/

View File

@@ -0,0 +1,30 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build rollup_relayer
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/permissionless_batches/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/rollup_relayer /bin/
WORKDIR /app
ENTRYPOINT ["rollup_relayer"]

View File

@@ -0,0 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -1,5 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
*target/*
permissionless-batches/conf/

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.33"
var tag = "v4.5.39"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -1083,9 +1083,7 @@ github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/uint256 v1.3.0/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/hydrogen18/memlistener v1.0.0/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
@@ -1122,7 +1120,6 @@ github.com/intel/goresctrl v0.3.0 h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c
github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk=
github.com/iris-contrib/jade v1.1.4/go.mod h1:EDqR+ur9piDl6DUgs6qRrlfzmlx/D5UybogqrXvJTBE=
github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E=
@@ -1228,7 +1225,6 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
@@ -1240,7 +1236,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
@@ -1418,6 +1413,8 @@ github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 h1:u371VK8eOU2Z/0SVf5KDI3eJc8msHSpJbav4do/8n38=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
@@ -1454,7 +1451,6 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/spiffe/go-spiffe/v2 v2.1.1 h1:RT9kM8MZLZIsPTH+HKQEP5yaAk3yd/VBzlINaRjXs8k=
github.com/spiffe/go-spiffe/v2 v2.1.1/go.mod h1:5qg6rpqlwIub0JAiF1UK9IMD6BpPTmvG6yfSgDBs5lg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
@@ -1481,7 +1477,6 @@ github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/a
github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo=
github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8=
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
@@ -1716,7 +1711,6 @@ golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/perf v0.0.0-20230113213139-801c7ef9e5c5/go.mod h1:UBKtEnL8aqnd+0JHqZ+2qoMDwtuy6cYhhKNoHLBiTQc=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1741,11 +1735,9 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1811,7 +1803,6 @@ golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -0,0 +1,22 @@
.PHONY: batch_production_submission launch_prover psql check_proving_status
export SCROLL_ZKVM_VERSION=0.4.2
PG_URL=postgres://postgres@localhost:5432/scroll
batch_production_submission:
docker compose --profile batch-production-submission up
launch_prover:
docker compose up -d
psql:
psql 'postgres://postgres@localhost:5432/scroll'
check_proving_status:
@echo "Checking proving status..."
@result=$$(psql "${PG_URL}" -t -c "SELECT proving_status = 4 AS is_status_success FROM batch ORDER BY index LIMIT 1;" | tr -d '[:space:]'); \
if [ "$$result" = "t" ]; then \
echo "✅ Prove succeeded! You're ready to submit permissionless batch and proof!"; \
else \
echo "Proof is not ready..."; \
fi

View File

@@ -0,0 +1,172 @@
# Permissionless Batches
Permissionless batches aka enforced batches is a feature that provides guarantee to users that they can exit Scroll even if the operator is down or censoring.
It allows anyone to take over and submit a batch (permissionless batch submission) together with a proof after a certain time period has passed without a batch being finalized on L1.
Once permissionless batch mode is activated, the operator can no longer submit batches in a permissioned way. Only the security council can deactivate permissionless batch mode and reinstate the operator as the only batch submitter.
There are two types of situations to consider:
- `Permissionless batch mode is activated:` This means that finalization halted for some time. Now anyone can submit batches utilizing the [batch production toolkit](#batch-production-toolkit).
- `Permissionless batch mode is deactivated:` This means that the security council has decided to reinstate the operator as the only batch submitter. The operator needs to [recover](#operator-recovery) the sequencer and relayer to resume batch submission and the valid L2 chain.
## Batch production toolkit
The batch production toolkit is a set of tools that allow anyone to submit a batch in permissionless mode. It consists of three main components:
1. l2geth state recovery from L1
2. l2geth block production
3. production, proving and submission of batch with `docker-compose.yml`
### Prerequisites
- Unix-like OS, 32GB RAM
- Docker
- [l2geth](https://github.com/scroll-tech/go-ethereum/) or [Docker image](https://hub.docker.com/r/scrolltech/l2geth) of corresponding [version](https://docs.scroll.io/en/technology/overview/scroll-upgrades/).
- access to an Ethereum L1 RPC node (beacon node and execution client)
- ability to run a prover
- L1 account with funds to pay for the batch submission
### 1. l2geth state recovery from L1
Once permissionless mode is activated there's no blocks being produced and propagated on L2. The first step is to recover the latest state of the L2 chain from L1. This is done by running l2geth in recovery mode.
Running l2geth in recovery mode requires following configuration:
- `--scroll` or `--scroll-sepolia` - enables Scroll Mainnet or Sepolia mode
- `--da.blob.beaconnode` - L1 RPC beacon node
- `--l1.endpoint` - L1 RPC execution client
- `--da.sync=true` - enables syncing with L1
- `--da.recovery` - enables recovery mode
- `--da.recovery.initiall1block` - initial L1 block (commit tx of initial batch)
- `--da.recovery.initialbatch` - batch where to start recovery from. Can be found on [Scrollscan Explorer](https://scrollscan.com/batches).
- `--da.recovery.l2endblock` - until which L2 block recovery should run (optional)
```bash
./build/bin/geth --scroll<-sepolia> \
--datadir "tmp/datadir" \
--gcmode archive \
--http --http.addr "0.0.0.0" --http.port 8545 --http.api "eth,net,web3,debug,scroll" --http.vhosts "*" \
--da.blob.beaconnode "<L1 RPC beacon node>" \
--l1.endpoint "<L1 RPC execution client>" \
--da.sync=true --da.recovery --da.recovery.initiall1block "<initial L1 block (commit tx of initial batch)>" --da.recovery.initialbatch "<batch where to start recovery from>" --da.recovery.l2endblock "<until which L2 block recovery should run (optional)>" \
--verbosity 3
```
### 2. l2geth block production
After the state is recovered, the next step is to produce blocks on L2. This is done by running l2geth in block production mode.
As a prerequisite, the state recovery must be completed and the latest state of the L2 chain must be available.
You also need to generate a keystore e.g. with [Clef](https://geth.ethereum.org/docs/fundamentals/account-management) to be able to sign blocks.
This key is not used for any funds, but required for block production to work. Once you generated blocks you can safely discard it.
Running l2geth in block production mode requires following configuration:
- `--scroll` or `--scroll-sepolia` - enables Scroll Mainnet or Sepolia mode
- `--da.blob.beaconnode` - L1 RPC beacon node
- `--l1.endpoint` - L1 RPC execution client
- `--da.sync=true` - enables syncing with L1
- `--da.recovery` - enables recovery mode
- `--da.recovery.produceblocks` - enables block production
- `--miner.etherbase '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' --mine` - enables mining. the address is not used, but required for mining to work
- `---miner.gaslimit 1 --miner.gasprice 1 --miner.maxaccountsnum 100 --rpc.gascap 0 --gpo.ignoreprice 1` - gas limits for block production
```bash
./build/bin/geth --scroll<-sepolia> \
--datadir "tmp/datadir" \
--gcmode archive \
--http --http.addr "0.0.0.0" --http.port 8545 --http.api "eth,net,web3,debug,scroll" --http.vhosts "*" \
--da.blob.beaconnode "<L1 RPC beacon node>" \
--l1.endpoint "<L1 RPC execution client>" \
--da.sync=true --da.recovery --da.recovery.produceblocks \
--miner.gaslimit 1 --miner.gasprice 1 --miner.maxaccountsnum 100 --rpc.gascap 0 --gpo.ignoreprice 1 \
--miner.etherbase '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' --mine \
--verbosity 3
```
### 3. production, proving and submission of batch with `docker-compose.yml`
After the blocks are produced, the next step is to produce a batch, prove it and submit it to L1. This is done by running the `docker-compose.yml` in the `permissionless-batches` folder.
#### Producing a batch
To produce a batch you need to run the `batch-production-submission` profile in `docker-compose.yml`.
1. Fill `conf/genesis.json` with the latest genesis state from the L2 chain. The genesis for the current fork can be found [here](https://docs.scroll.io/en/technology/overview/scroll-upgrades/).
2. Make sure that `l2geth` with your locally produced blocks is running and reachable from the Docker network (e.g. `http://host.docker.internal:8545`)
3. Fill in required fields in `conf/relayer/config.json`
Run with `make batch_production_submission`.
This will produce chunks, a batch and bundle which will be proven in the next step.
`Success! You're ready to generate proofs!` indicates that everything is working correctly and the batch is ready to be proven.
#### Proving a batch
To prove the chunk, batch and bundle you just generated you need to run the `prover` profile in `docker-compose.yml`.
Local Proving:
1. Hardware spec for local prover: CPU: 36+ core, 128G memory GPU: 24G memory (e.g. Rtx 3090/3090Ti/4090/A10/L4)
2. Make sure `verifier` and `high_version_circuit` in `conf/coordinator/config.json` are correct for the [latest fork](https://docs.scroll.io/en/technology/overview/scroll-upgrades/)
3. Set the `SCROLL_ZKVM_VERSION` environment variable on `Makefile` to the correct [version](https://docs.scroll.io/en/technology/overview/scroll-upgrades/).
4. Fill in the required fields in `conf/proving-service/config.json`
Run with `make launch_prover`.
#### Batch submission
To submit the batch you need to run the `batch-production-submission` profile in `docker-compose.yml`.
1. Fill in required fields in `conf/relayer/config.json` for the sender config.
Run with `make batch_production_submission`.
This will submit the batch to L1 and finalize it. The transaction will be retried in case of failure.
**Troubleshooting**
- in case the submission fails it will print the calldata for the transaction in an error message. You can use this with `cast call --trace --rpc-url "$SCROLL_L1_DEPLOYMENT_RPC" "$L1_SCROLL_CHAIN_PROXY_ADDR" <calldata>` to see what went wrong.
- `0x4df567b9: ErrorNotInEnforcedBatchMode`: permissionless batch mode is not activated, you can't submit a batch
- `0xa5d305cc: ErrorBatchIsEmpty`: no blob was provided. This is usually returned if you do the `cast call`, permissionless mode is activated but you didn't provide a blob in the transaction.
## Operator recovery
Operator recovery needs to be run by the rollup operator to resume normal rollup operation after permissionless batch mode is deactivated. It consists of two main components:
1. l2geth recovery
2. Relayer recovery
These steps are required to resume permissioned batch submission and the valid L2 chain. They will restore the entire history of the batches submitted during permissionless mode.
### Prerequisites
- l2geth with the latest state of the L2 chain (before permissionless mode was activated)
- signer key for the sequencer according to Clique consensus
- relayer and coordinator are set up, running and up-to-date with the latest state of the L2 chain (before permissionless mode was activated)
### l2geth recovery
Running l2geth in recovery mode requires following configuration:
- `--scroll` or `--scroll-sepolia` - enables Scroll Mainnet or Sepolia mode
- `--da.blob.beaconnode` - L1 RPC beacon node
- `--l1.endpoint` - L1 RPC execution client
- `--da.sync=true` - enables syncing with L1
- `--da.recovery` - enables recovery mode
- `--da.recovery.signblocks` - enables signing blocks with the sequencer and configured key
- `--da.recovery.initiall1block` - initial L1 block (commit tx of initial batch)
- `--da.recovery.initialbatch` - batch where to start recovery from. Can be found on [Scrollscan Explorer](https://scrollscan.com/batches).
- `--da.recovery.l2endblock` - until which L2 block recovery should run (optional)
```bash
./build/bin/geth --scroll<-sepolia> \
--datadir "tmp/datadir" \
--gcmode archive \
--http --http.addr "0.0.0.0" --http.port 8545 --http.api "eth,net,web3,debug,scroll" --http.vhosts "*" \
--da.blob.beaconnode "<L1 RPC beacon node>" \
--l1.endpoint "<L1 RPC execution client>" \
--da.sync=true --da.recovery --da.recovery.signblocks --da.recovery.initiall1block "<initial L1 block (commit tx of initial batch)>" --da.recovery.initialbatch "<batch where to start recovery from>" --da.recovery.l2endblock "<until which L2 block recovery should run (optional)>" \
--verbosity 3
```
After the recovery is finished, start the sequencer in normal operation and continue issuing L2 blocks as normal. This will resume the L2 chain, allow the relayer (after running recovery) to create new batches and allow other L2 follower nodes to sync up the valid and signed L2 chain.
### Relayer recovery
Start the relayer with the following additional top-level configuration:
```
"recovery_config": {
"enable": true
}
```
This will make the relayer recover all the chunks, batches and bundles that were submitted during permissionless mode. These batches are marked automatically as proven and finalized.
Once this process is finished, start the relayer normally without the recovery config to resume normal operation.
```
"recovery_config": {
"enable": false
}
```

View File

@@ -0,0 +1,30 @@
{
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 100,
"chunk_collection_time_sec": 36000,
"batch_collection_time_sec": 2700,
"bundle_collection_time_sec": 2700,
"verifier": {
"high_version_circuit" : {
"fork_name": "euclid",
"assets_path": "/verifier/openvm/verifier",
"min_prover_version": "v4.5.7"
}
}
},
"db": {
"driver_name": "postgres",
"dsn": "postgres://db/scroll?sslmode=disable&user=postgres",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"l2": {
"chain_id": 333333
},
"auth": {
"secret": "e788b62d39254928a821ac1c76b274a8c835aa1e20ecfb6f50eb10e87847de44",
"challenge_expire_duration_sec": 10,
"login_expire_duration_sec": 3600
}
}

View File

@@ -0,0 +1,76 @@
#!/usr/bin/bash
apt update
apt install -y wget libdigest-sha-perl
# release version
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
echo "SCROLL_ZKVM_VERSION not set"
exit 1
fi
if [ -z "${HTTP_PORT}" ]; then
echo "HTTP_PORT not set"
exit 1
fi
if [ -z "${METRICS_PORT}" ]; then
echo "METRICS_PORT not set"
exit 1
fi
case $CHAIN_ID in
"5343532222") # staging network
echo "staging network not supported"
exit 1
;;
"534353") # alpha network
echo "alpha network not supported"
exit 1
;;
esac
BASE_DOWNLOAD_DIR="/verifier"
# Ensure the base directory exists
mkdir -p "$BASE_DOWNLOAD_DIR"
# Set subdirectories
OPENVM_DIR="$BASE_DOWNLOAD_DIR/openvm"
# Create necessary directories
mkdir -p "$OPENVM_DIR/verifier"
# Define URLs for OpenVM files (No checksum verification)
OPENVM_URLS=(
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/verifier.bin"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root-verifier-vm-config"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root-verifier-committed-exe"
)
# Download OpenVM files (No checksum verification, but skips if file exists)
for url in "${OPENVM_URLS[@]}"; do
dest_subdir="$OPENVM_DIR/$(basename $(dirname "$url"))"
mkdir -p "$dest_subdir"
filepath="$dest_subdir/$(basename "$url")"
echo "Downloading $filepath..."
curl -o "$filepath" -L "$url"
done
mkdir -p "$HOME/.openvm"
ln -s "$OPENVM_DIR/params" "$HOME/.openvm/params"
echo "All files downloaded successfully! 🎉"
mkdir -p /usr/local/bin
wget https://github.com/ethereum/solidity/releases/download/v0.8.19/solc-static-linux -O /usr/local/bin/solc
chmod +x /usr/local/bin/solc
# Start coordinator
echo "Starting coordinator api"
RUST_BACKTRACE=1 exec coordinator_api --config /coordinator/config.json \
--genesis /coordinator/genesis.json \
--http --http.addr "0.0.0.0" --http.port ${HTTP_PORT} \
--metrics --metrics.addr "0.0.0.0" --metrics.port ${METRICS_PORT} \
--log.debug

View File

@@ -0,0 +1 @@
<fill with correct genesis.json>

View File

@@ -0,0 +1,28 @@
{
"sdk_config": {
"prover_name_prefix": "local_prover",
"keys_dir": "/keys",
"db_path": "/db",
"coordinator": {
"base_url": "http://172.17.0.1:8556",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
},
"l2geth": {
"endpoint": "<L2 RPC with generated blocks reachable from Docker network>"
},
"prover": {
"circuit_type": 2,
"supported_proof_types": [1,2,3],
"circuit_version": "v0.13.1"
},
"health_listener_addr": "0.0.0.0:89"
},
"circuits": {
"euclidV2": {
"hard_fork_name": "euclidV2",
"workspace_path": "/openvm"
}
}
}

View File

@@ -0,0 +1,54 @@
#!/usr/bin/bash
apt update
apt install -y wget curl
# release version
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
echo "SCROLL_ZKVM_VERSION not set"
exit 1
fi
BASE_DOWNLOAD_DIR="/openvm"
# Ensure the base directory exists
mkdir -p "$BASE_DOWNLOAD_DIR"
# Define URLs for OpenVM files (No checksum verification)
OPENVM_URLS=(
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/app.vmexe"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/openvm.toml"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/app.vmexe"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/openvm.toml"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app.vmexe"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app_euclidv1.vmexe"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/openvm.toml"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/verifier.bin"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/verifier.sol"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/digest_1.hex"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/digest_2.hex"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/digest_1_euclidv1.hex"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/digest_2_euclidv1.hex"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/params/kzg_bn254_22.srs"
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/params/kzg_bn254_24.srs"
)
# Download OpenVM files (No checksum verification, but skips if file exists)
for url in "${OPENVM_URLS[@]}"; do
dest_subdir="$BASE_DOWNLOAD_DIR/$(basename $(dirname "$url"))"
mkdir -p "$dest_subdir"
filepath="$dest_subdir/$(basename "$url")"
echo "Downloading $filepath..."
curl -o "$filepath" -L "$url"
done
mkdir -p "$HOME/.openvm"
ln -s "/openvm/params" "$HOME/.openvm/params"
mkdir -p /usr/local/bin
wget https://github.com/ethereum/solidity/releases/download/v0.8.19/solc-static-linux -O /usr/local/bin/solc
chmod +x /usr/local/bin/solc
mkdir -p /openvm/cache
RUST_MIN_STACK=16777216 RUST_BACKTRACE=1 exec /prover/prover --config /prover/conf/config.json

View File

@@ -0,0 +1,49 @@
{
"l1_config": {
"endpoint": "<L1 RPC execution node>"
},
"l2_config": {
"confirmations": "0x0",
"endpoint": "<L2 RPC with generated blocks reachable from Docker network>",
"relayer_config": {
"commit_sender_signer_config": {
"signer_type": "PrivateKey",
"private_key_signer_config": {
"private_key": "<the private key of L1 address to submit permissionless batch, please fund it in advance>"
}
}
},
"chunk_proposer_config": {
"propose_interval_milliseconds": 100,
"max_block_num_per_chunk": 100,
"max_l2_gas_per_chunk": 20000000,
"chunk_timeout_sec": 300,
"max_uncompressed_batch_bytes_size": 4194304
},
"batch_proposer_config": {
"propose_interval_milliseconds": 1000,
"batch_timeout_sec": 300,
"max_chunks_per_batch": 45,
"max_uncompressed_batch_bytes_size": 4194304
},
"bundle_proposer_config": {
"max_batch_num_per_bundle": 20,
"bundle_timeout_sec": 36000
}
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://172.17.0.1:5432/scroll?sslmode=disable&user=postgres",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"recovery_config": {
"enable": true,
"l1_block_height": "<commit tx of last finalized batch on L1>",
"latest_finalized_batch": "<last finalized batch on L1>",
"l2_block_height_limit": "<L2 block up to which to produce batch>",
"force_latest_finalized_batch": false,
"force_l1_message_count": 0,
"submit_without_proof": false
}
}

View File

@@ -0,0 +1,98 @@
name: permissionless-batches
services:
relayer-batch-production:
build:
context: ../
dockerfile: build/dockerfiles/recovery_permissionless_batches.Dockerfile
network_mode: host
container_name: permissionless-batches-relayer
volumes:
- ./conf/relayer/config.json:/app/conf/config.json
- ./conf/genesis.json:/app/conf/genesis.json
command: "--config /app/conf/config.json --min-codec-version 0"
profiles:
- batch-production-submission
depends_on:
db:
condition: service_healthy
db:
image: postgres:17.0
environment:
POSTGRES_HOST_AUTH_METHOD: trust
POSTGRES_USER: postgres
POSTGRES_DB: scroll
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U postgres" ]
interval: 1s
timeout: 1s
retries: 10
volumes:
- db_data:/var/lib/postgresql/data
ports:
- "5432:5432"
coordinator-api:
image: scrolltech/coordinator-api:v4.5.19
volumes:
- ./conf/coordinator/config.json:/coordinator/config.json:ro
- ./conf/genesis.json:/coordinator/genesis.json:ro
- ./conf/coordinator/coordinator_run.sh:/bin/coordinator_run.sh
entrypoint: /bin/coordinator_run.sh
profiles:
- local-prover
- cloud-prover
ports: [8556:8555]
environment:
- SCROLL_ZKVM_VERSION=${SCROLL_ZKVM_VERSION}
- SCROLL_PROVER_ASSETS_DIR=/verifier/assets/
- HTTP_PORT=8555
- METRICS_PORT=8390
depends_on:
db:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8555/coordinator/v1/challenge"]
interval: 1s
timeout: 1s
retries: 10
start_period: 5m
coordinator-cron:
build:
context: ../
dockerfile: build/dockerfiles/coordinator-cron.Dockerfile
volumes:
- ./conf/coordinator/config.json:/app/conf/config.json
command: "--config /app/conf/config.json --verbosity 3"
profiles:
- local-prover
- cloud-prover
depends_on:
db:
condition: service_healthy
local-prover:
image: scrolltech/cuda-prover:v4.5.12-97de9882-6ad5d8c-261b322
network_mode: host
platform: linux/amd64
runtime: nvidia
entrypoint: /bin/prover_run.sh
environment:
- SCROLL_ZKVM_VERSION=${SCROLL_ZKVM_VERSION}
- LD_LIBRARY_PATH=/prover:/usr/local/cuda/lib64
- RUST_MIN_STACK=16777216
- RUST_BACKTRACE=1
- RUST_LOG=info
volumes:
- ./conf/proving-service/config.json:/prover/conf/config.json:ro
- ./conf/proving-service/prover_run.sh:/bin/prover_run.sh
- ./conf/proving-service/db:/db
- ./conf/proving-service/keys:/keys
depends_on:
coordinator-api:
condition: service_healthy
volumes:
db_data:

View File

@@ -1,13 +1,12 @@
package bridgeabi
import (
"fmt"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/stretchr/testify/assert"
"github.com/scroll-tech/go-ethereum/common"
)
func TestPackCommitBatches(t *testing.T) {
@@ -92,20 +91,3 @@ func TestPackSetL2BaseFee(t *testing.T) {
_, err = l2GasOracleABI.Pack("setL2BaseFee", baseFee)
assert.NoError(err)
}
func TestPrintABISignatures(t *testing.T) {
// print all error signatures of ABI
abi, err := ScrollChainMetaData.GetAbi()
if err != nil {
t.Fatal(err)
}
for _, method := range abi.Methods {
fmt.Println(hexutil.Encode(method.ID[:4]), method.Sig, method.Name)
}
fmt.Println("------------------------------")
for _, errors := range abi.Errors {
fmt.Println(hexutil.Encode(errors.ID[:4]), errors.Sig, errors.Name)
}
}

View File

@@ -0,0 +1,144 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/da-codec/encoding"
"github.com/urfave/cli/v2"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/permissionless_batches"
"scroll-tech/rollup/internal/controller/watcher"
)
var app *cli.App
func init() {
// Set up rollup-relayer app info.
app = cli.NewApp()
app.Action = action
app.Name = "permissionless-batches"
app.Usage = "The Scroll Rollup Relayer for permissionless batch production"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, utils.RollupRelayerFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
defer cancel()
// Sanity check config. Make sure the required fields are set.
if cfg.RecoveryConfig == nil {
return fmt.Errorf("recovery config must be specified")
}
if cfg.RecoveryConfig.L1BeaconNodeEndpoint == "" {
return fmt.Errorf("L1 beacon node endpoint must be specified")
}
if cfg.RecoveryConfig.L1BlockHeight == 0 {
return fmt.Errorf("L1 block height must be specified")
}
if cfg.RecoveryConfig.LatestFinalizedBatch == 0 {
return fmt.Errorf("latest finalized batch must be specified")
}
// init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
if err = database.CloseDB(db); err != nil {
log.Crit("failed to close db connection", "error", err)
}
}()
registry := prometheus.DefaultRegisterer
observability.Server(ctx, db)
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := utils.ReadGenesis(genesisPath)
if err != nil {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name))
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, false, registry)
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
return fmt.Errorf("failed to connect to L2geth at RPC=%s: %w", cfg.L2Config.Endpoint, err)
}
l2Watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)
recovery := permissionless_batches.NewRecovery(subCtx, cfg, genesis, db, chunkProposer, batchProposer, bundleProposer, l2Watcher)
if recovery.RecoveryNeeded() {
if err = recovery.Run(); err != nil {
return fmt.Errorf("failed to run recovery: %w", err)
}
log.Info("Success! You're ready to generate proofs!")
} else {
log.Info("No recovery needed, submitting batch and proof to L1...")
submitter, err := permissionless_batches.NewSubmitter(subCtx, db, cfg.L2Config.RelayerConfig, genesis.Config)
if err != nil {
return fmt.Errorf("failed to create submitter: %w", err)
}
if err = submitter.Submit(!cfg.RecoveryConfig.SubmitWithoutProof); err != nil {
return fmt.Errorf("failed to submit batch: %w", err)
}
log.Info("Transaction submitted to L1, waiting for confirmation...")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
select {
case <-subCtx.Done():
case confirmation := <-submitter.Sender().ConfirmChan():
if confirmation.IsSuccessful {
log.Info("Transaction confirmed on L1, your permissionless batch is part of the ledger!", "tx hash", confirmation.TxHash)
}
case <-interrupt:
log.Info("CTRL-C received, shutting down...")
}
}
return nil
}
// Run rollup relayer cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/rollup/cmd/permissionless_batches/app"
func main() {
app.Run()
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/l1"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
@@ -112,6 +113,32 @@ func action(ctx *cli.Context) error {
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)
if cfg.RecoveryConfig != nil && cfg.RecoveryConfig.Enable {
log.Info("Starting rollup-relayer in recovery mode", "version", version.Version)
l1Client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
return fmt.Errorf("failed to connect to L1 client: %w", err)
}
reader, err := l1.NewReader(context.Background(), l1.Config{
ScrollChainAddress: genesis.Config.Scroll.L1Config.ScrollChainAddress,
L1MessageQueueAddress: genesis.Config.Scroll.L1Config.L1MessageQueueAddress,
}, l1Client)
if err != nil {
return fmt.Errorf("failed to create L1 reader: %w", err)
}
fullRecovery, err := relayer.NewFullRecovery(subCtx, cfg, genesis, db, chunkProposer, batchProposer, bundleProposer, l2watcher, l1Client, reader)
if err != nil {
return fmt.Errorf("failed to create full recovery: %w", err)
}
if err = fullRecovery.RestoreFullPreviousState(); err != nil {
log.Crit("failed to restore full previous state", "error", err)
}
return nil
}
// Watcher loop to fetch missing blocks
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
@@ -119,7 +146,8 @@ func action(ctx *cli.Context) error {
log.Error("failed to get block number", "err", loopErr)
return
}
l2watcher.TryFetchRunningMissingBlocks(number)
// errors are logged in the try method as well
_ = l2watcher.TryFetchRunningMissingBlocks(number)
})
go utils.Loop(subCtx, time.Duration(cfg.L2Config.ChunkProposerConfig.ProposeIntervalMilliseconds)*time.Millisecond, chunkProposer.TryProposeChunk)

View File

@@ -3,7 +3,7 @@
"endpoint": "https://rpc.ankr.com/eth",
"start_height": 0,
"relayer_config": {
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_contract_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://rpc.scroll.io",
"escalate_blocks": 1,
@@ -38,7 +38,6 @@
"relayer_config": {
"validium_mode": false,
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://rpc.ankr.com/eth",
"escalate_blocks": 1,

View File

@@ -18,9 +18,10 @@ import (
// Config load configuration items.
type Config struct {
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *database.Config `json:"db_config"`
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *database.Config `json:"db_config"`
RecoveryConfig *RecoveryConfig `json:"recovery_config"`
}
type ConfigForReplay struct {

View File

@@ -8,4 +8,7 @@ type L1Config struct {
StartHeight uint64 `json:"start_height"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
// beacon node url
BeaconNodeEndpoint string `json:"beacon_node_endpoint"`
}

View File

@@ -0,0 +1,14 @@
package config
type RecoveryConfig struct {
Enable bool `json:"enable"`
L1BeaconNodeEndpoint string `json:"l1_beacon_node_endpoint"` // the L1 beacon node endpoint to connect to
L1BlockHeight uint64 `json:"l1_block_height"` // the L1 block height to start recovery from
LatestFinalizedBatch uint64 `json:"latest_finalized_batch"` // the latest finalized batch number
L2BlockHeightLimit uint64 `json:"l2_block_height_limit"` // L2 block up to which to produce batch
ForceLatestFinalizedBatch bool `json:"force_latest_finalized_batch"` // whether to force usage of the latest finalized batch - mainly used for testing
ForceL1MessageCount uint64 `json:"force_l1_message_count"` // force the number of L1 messages, useful for testing
SubmitWithoutProof bool `json:"submit_without_proof"` // whether to submit batches without proof, useful for testing
}

View File

@@ -167,7 +167,7 @@ func (b *BlobUploader) constructBlobCodec(dbBatch *orm.Batch) (*kzg4844.Blob, er
Chunks: chunks,
}
case encoding.CodecV7:
case encoding.CodecV7, encoding.CodecV8:
encodingBatch = &encoding.Batch{
Index: dbBatch.Index,
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),

View File

@@ -0,0 +1,458 @@
package permissionless_batches
import (
"context"
"fmt"
"github.com/scroll-tech/da-codec/encoding"
"gorm.io/gorm"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
"github.com/scroll-tech/go-ethereum/rollup/l1"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/watcher"
"scroll-tech/rollup/internal/orm"
)
const (
// defaultFakeRestoredChunkIndex is the default index of the last restored fake chunk. It is used to be able to generate new chunks pretending that we have already processed some chunks.
defaultFakeRestoredChunkIndex uint64 = 1337
// defaultFakeRestoredBundleIndex is the default index of the last restored fake bundle. It is used to be able to generate new bundles pretending that we have already processed some bundles.
defaultFakeRestoredBundleIndex uint64 = 1
)
type MinimalRecovery struct {
ctx context.Context
cfg *config.Config
genesis *core.Genesis
db *gorm.DB
chunkORM *orm.Chunk
batchORM *orm.Batch
bundleORM *orm.Bundle
chunkProposer *watcher.ChunkProposer
batchProposer *watcher.BatchProposer
bundleProposer *watcher.BundleProposer
l2Watcher *watcher.L2WatcherClient
}
func NewRecovery(ctx context.Context, cfg *config.Config, genesis *core.Genesis, db *gorm.DB, chunkProposer *watcher.ChunkProposer, batchProposer *watcher.BatchProposer, bundleProposer *watcher.BundleProposer, l2Watcher *watcher.L2WatcherClient) *MinimalRecovery {
return &MinimalRecovery{
ctx: ctx,
cfg: cfg,
genesis: genesis,
db: db,
chunkORM: orm.NewChunk(db),
batchORM: orm.NewBatch(db),
bundleORM: orm.NewBundle(db),
chunkProposer: chunkProposer,
batchProposer: batchProposer,
bundleProposer: bundleProposer,
l2Watcher: l2Watcher,
}
}
func (r *MinimalRecovery) RecoveryNeeded() bool {
chunk, err := r.chunkORM.GetLatestChunk(r.ctx)
if err != nil || chunk == nil {
return true
}
if chunk.Index <= defaultFakeRestoredChunkIndex {
return true
}
batch, err := r.batchORM.GetLatestBatch(r.ctx)
if err != nil {
return true
}
if batch.Index <= r.cfg.RecoveryConfig.LatestFinalizedBatch {
return true
}
bundle, err := r.bundleORM.GetLatestBundle(r.ctx)
if err != nil {
return true
}
if bundle.Index <= defaultFakeRestoredBundleIndex {
return true
}
return false
}
func (r *MinimalRecovery) Run() error {
// Make sure we start from a clean state.
if err := r.resetDB(); err != nil {
return fmt.Errorf("failed to reset DB: %w", err)
}
// Restore minimal previous state required to be able to create new chunks, batches and bundles.
restoredFinalizedChunk, restoredFinalizedBatch, restoredFinalizedBundle, err := r.restoreMinimalPreviousState()
if err != nil {
return fmt.Errorf("failed to restore minimal previous state: %w", err)
}
// Fetch and insert the missing blocks from the last block in the latestFinalizedBatch to the latest L2 block.
fromBlock := restoredFinalizedChunk.EndBlockNumber
toBlock, err := r.fetchL2Blocks(fromBlock, r.cfg.RecoveryConfig.L2BlockHeightLimit)
if err != nil {
return fmt.Errorf("failed to fetch L2 blocks: %w", err)
}
// Create chunks for L2 blocks.
log.Info("Creating chunks for L2 blocks", "from", fromBlock, "to", toBlock)
var latestChunk *orm.Chunk
var count int
for {
if err = r.chunkProposer.ProposeChunk(); err != nil {
return fmt.Errorf("failed to propose chunk: %w", err)
}
count++
latestChunk, err = r.chunkORM.GetLatestChunk(r.ctx)
if err != nil {
return fmt.Errorf("failed to get latest latestFinalizedChunk: %w", err)
}
log.Info("Chunk created", "index", latestChunk.Index, "hash", latestChunk.Hash, "StartBlockNumber", latestChunk.StartBlockNumber, "EndBlockNumber", latestChunk.EndBlockNumber, "TotalL1MessagesPoppedBefore", latestChunk.TotalL1MessagesPoppedBefore)
// We have created chunks for all available L2 blocks.
if latestChunk.EndBlockNumber >= toBlock {
break
}
}
log.Info("Chunks created", "count", count, "latest Chunk", latestChunk.Index, "hash", latestChunk.Hash, "StartBlockNumber", latestChunk.StartBlockNumber, "EndBlockNumber", latestChunk.EndBlockNumber, "TotalL1MessagesPoppedBefore", latestChunk.TotalL1MessagesPoppedBefore, "PrevL1MessageQueueHash", latestChunk.PrevL1MessageQueueHash, "PostL1MessageQueueHash", latestChunk.PostL1MessageQueueHash)
// Create batch for the created chunks. We only allow 1 batch it needs to be submitted (and finalized) with a proof in a single step.
log.Info("Creating batch for chunks", "from", restoredFinalizedChunk.Index+1, "to", latestChunk.Index)
r.batchProposer.TryProposeBatch()
latestBatch, err := r.batchORM.GetLatestBatch(r.ctx)
if err != nil {
return fmt.Errorf("failed to get latest latestFinalizedBatch: %w", err)
}
// Sanity check that the batch was created correctly:
// 1. should be a new batch
// 2. should contain all chunks created
if restoredFinalizedBatch.Index+1 != latestBatch.Index {
return fmt.Errorf("batch was not created correctly, expected %d but got %d", restoredFinalizedBatch.Index+1, latestBatch.Index)
}
firstChunkInBatch, err := r.chunkORM.GetChunkByIndex(r.ctx, latestBatch.StartChunkIndex)
if err != nil {
return fmt.Errorf("failed to get first chunk in batch: %w", err)
}
lastChunkInBatch, err := r.chunkORM.GetChunkByIndex(r.ctx, latestBatch.EndChunkIndex)
if err != nil {
return fmt.Errorf("failed to get last chunk in batch: %w", err)
}
// Make sure that the batch contains all previously created chunks and thus all blocks. If not the user will need to
// produce another batch (running the application again) starting from the end block of the last chunk in the batch + 1.
if latestBatch.EndChunkIndex != latestChunk.Index {
log.Warn("Produced batch does not contain all chunks and blocks. You'll need to produce another batch starting from end block+1.", "starting block", firstChunkInBatch.StartBlockNumber, "end block", lastChunkInBatch.EndBlockNumber, "latest block", latestChunk.EndBlockNumber)
}
log.Info("Batch created", "index", latestBatch.Index, "hash", latestBatch.Hash, "StartChunkIndex", latestBatch.StartChunkIndex, "EndChunkIndex", latestBatch.EndChunkIndex, "starting block", firstChunkInBatch.StartBlockNumber, "ending block", lastChunkInBatch.EndBlockNumber, "PrevL1MessageQueueHash", latestBatch.PrevL1MessageQueueHash, "PostL1MessageQueueHash", latestBatch.PostL1MessageQueueHash)
if err = r.bundleProposer.UpdateDBBundleInfo([]*orm.Batch{latestBatch}, encoding.CodecVersion(latestBatch.CodecVersion)); err != nil {
return fmt.Errorf("failed to create bundle: %w", err)
}
latestBundle, err := r.bundleORM.GetLatestBundle(r.ctx)
if err != nil {
return fmt.Errorf("failed to get latest bundle: %w", err)
}
// Sanity check that the bundle was created correctly:
// 1. should be a new bundle
// 2. should only contain 1 batch, the one we created
if restoredFinalizedBundle.Index == latestBundle.Index {
return fmt.Errorf("bundle was not created correctly")
}
if latestBundle.StartBatchIndex != latestBatch.Index || latestBundle.EndBatchIndex != latestBatch.Index {
return fmt.Errorf("bundle does not contain the correct batch: %d != %d", latestBundle.StartBatchIndex, latestBatch.Index)
}
log.Info("Bundle created", "index", latestBundle.Index, "hash", latestBundle.Hash, "StartBatchIndex", latestBundle.StartBatchIndex, "EndBatchIndex", latestBundle.EndBatchIndex, "starting block", firstChunkInBatch.StartBlockNumber, "ending block", lastChunkInBatch.EndBlockNumber)
return nil
}
// restoreMinimalPreviousState restores the minimal previous state required to be able to create new chunks, batches and bundles.
func (r *MinimalRecovery) restoreMinimalPreviousState() (*orm.Chunk, *orm.Batch, *orm.Bundle, error) {
log.Info("Restoring previous state with", "L1 block height", r.cfg.RecoveryConfig.L1BlockHeight, "latest finalized batch", r.cfg.RecoveryConfig.LatestFinalizedBatch)
l1Client, err := ethclient.Dial(r.cfg.L1Config.Endpoint)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to connect to L1 client: %w", err)
}
reader, err := l1.NewReader(r.ctx, l1.Config{
ScrollChainAddress: r.genesis.Config.Scroll.L1Config.ScrollChainAddress,
L1MessageQueueAddress: r.genesis.Config.Scroll.L1Config.L1MessageQueueV2Address,
}, l1Client)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to create L1 reader: %w", err)
}
// 1. Sanity check user input: Make sure that the user's L1 block height is not higher than the latest finalized block number.
latestFinalizedL1Block, err := reader.GetLatestFinalizedBlockNumber()
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to get latest finalized L1 block number: %w", err)
}
if r.cfg.RecoveryConfig.L1BlockHeight > latestFinalizedL1Block {
return nil, nil, nil, fmt.Errorf("specified L1 block height is higher than the latest finalized block number: %d > %d", r.cfg.RecoveryConfig.L1BlockHeight, latestFinalizedL1Block)
}
log.Info("Latest finalized L1 block number", "latest finalized L1 block", latestFinalizedL1Block)
// 2. Make sure that the specified batch is indeed finalized on the L1 rollup contract and is the latest finalized batch.
var latestFinalizedBatchIndex uint64
if r.cfg.RecoveryConfig.ForceLatestFinalizedBatch {
latestFinalizedBatchIndex = r.cfg.RecoveryConfig.LatestFinalizedBatch
} else {
latestFinalizedBatchIndex, err = reader.LatestFinalizedBatchIndex(latestFinalizedL1Block)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to get latest finalized batch: %w", err)
}
if r.cfg.RecoveryConfig.LatestFinalizedBatch != latestFinalizedBatchIndex {
return nil, nil, nil, fmt.Errorf("batch %d is not the latest finalized batch: %d", r.cfg.RecoveryConfig.LatestFinalizedBatch, latestFinalizedBatchIndex)
}
}
// Find the commit event for the latest finalized batch.
var batchCommitEvent *l1.CommitBatchEvent
err = reader.FetchRollupEventsInRangeWithCallback(r.cfg.RecoveryConfig.L1BlockHeight, latestFinalizedL1Block, func(event l1.RollupEvent) bool {
if event.Type() == l1.CommitEventType && event.BatchIndex().Uint64() == latestFinalizedBatchIndex {
batchCommitEvent = event.(*l1.CommitBatchEvent)
// We found the commit event for the batch, stop searching.
return false
}
// Continue until we find the commit event for the batch.
return true
})
if batchCommitEvent == nil {
return nil, nil, nil, fmt.Errorf("commit event not found for batch %d", latestFinalizedBatchIndex)
}
log.Info("Found commit event for batch", "batch", batchCommitEvent.BatchIndex(), "hash", batchCommitEvent.BatchHash(), "L1 block height", batchCommitEvent.BlockNumber(), "L1 tx hash", batchCommitEvent.TxHash())
// 3. Fetch commit tx data for latest finalized batch and decode it.
daBatch, daBlobPayload, err := r.decodeLatestFinalizedBatch(reader, batchCommitEvent)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to decode latest finalized batch: %w", err)
}
fmt.Println(daBatch, daBlobPayload)
blocksInBatch := daBlobPayload.Blocks()
if len(blocksInBatch) == 0 {
return nil, nil, nil, fmt.Errorf("no blocks in batch %d", batchCommitEvent.BatchIndex())
}
lastBlockInBatch := blocksInBatch[len(blocksInBatch)-1]
log.Info("Last L2 block in batch", "batch", batchCommitEvent.BatchIndex(), "L2 block", lastBlockInBatch, "PostL1MessageQueueHash", daBlobPayload.PostL1MessageQueueHash())
// 4. Get the L1 messages count and state root after the latest finalized batch.
var l1MessagesCount uint64
if r.cfg.RecoveryConfig.ForceL1MessageCount == 0 {
l1MessagesCount, err = reader.NextUnfinalizedL1MessageQueueIndex(latestFinalizedL1Block)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to get L1 messages count: %w", err)
}
} else {
l1MessagesCount = r.cfg.RecoveryConfig.ForceL1MessageCount
}
log.Info("L1 messages count after latest finalized batch", "batch", batchCommitEvent.BatchIndex(), "count", l1MessagesCount)
stateRoot, err := reader.GetFinalizedStateRootByBatchIndex(latestFinalizedL1Block, latestFinalizedBatchIndex)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to get state root: %w", err)
}
log.Info("State root after latest finalized batch", "batch", batchCommitEvent.BatchIndex(), "stateRoot", stateRoot.Hex())
// 5. Insert minimal state to DB.
chunk, err := r.chunkORM.InsertPermissionlessChunk(r.ctx, defaultFakeRestoredChunkIndex, daBatch.Version(), daBlobPayload, l1MessagesCount, stateRoot)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to insert chunk raw: %w", err)
}
log.Info("Inserted last finalized chunk to DB", "chunk", chunk.Index, "hash", chunk.Hash, "StartBlockNumber", chunk.StartBlockNumber, "EndBlockNumber", chunk.EndBlockNumber, "TotalL1MessagesPoppedBefore", chunk.TotalL1MessagesPoppedBefore)
batch, err := r.batchORM.InsertPermissionlessBatch(r.ctx, batchCommitEvent.BatchIndex(), batchCommitEvent.BatchHash(), daBatch.Version(), chunk)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to insert batch raw: %w", err)
}
log.Info("Inserted last finalized batch to DB", "batch", batch.Index, "hash", batch.Hash)
var bundle *orm.Bundle
err = r.db.Transaction(func(dbTX *gorm.DB) error {
bundle, err = r.bundleORM.InsertBundle(r.ctx, []*orm.Batch{batch}, encoding.CodecVersion(batch.CodecVersion), dbTX)
if err != nil {
return fmt.Errorf("failed to insert bundle: %w", err)
}
if err = r.bundleORM.UpdateProvingStatus(r.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); err != nil {
return fmt.Errorf("failed to update proving status: %w", err)
}
if err = r.bundleORM.UpdateRollupStatus(r.ctx, bundle.Hash, types.RollupFinalized); err != nil {
return fmt.Errorf("failed to update rollup status: %w", err)
}
log.Info("Inserted last finalized bundle to DB", "bundle", bundle.Index, "hash", bundle.Hash, "StartBatchIndex", bundle.StartBatchIndex, "EndBatchIndex", bundle.EndBatchIndex)
return nil
})
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to insert bundle: %w", err)
}
return chunk, batch, bundle, nil
}
func (r *MinimalRecovery) decodeLatestFinalizedBatch(reader *l1.Reader, event *l1.CommitBatchEvent) (encoding.DABatch, encoding.DABlobPayload, error) {
blockHeader, err := reader.FetchBlockHeaderByNumber(event.BlockNumber())
if err != nil {
return nil, nil, fmt.Errorf("failed to get header by number, err: %w", err)
}
args, err := reader.FetchCommitTxData(event)
if err != nil {
return nil, nil, fmt.Errorf("failed to fetch commit tx data: %w", err)
}
codecVersion := encoding.CodecVersion(args.Version)
if codecVersion < encoding.CodecV7 {
return nil, nil, fmt.Errorf("codec version %d is not supported", codecVersion)
}
codec, err := encoding.CodecFromVersion(codecVersion)
if err != nil {
return nil, nil, fmt.Errorf("failed to get codec: %w", err)
}
// Since we only store the last batch hash committed in a single tx in the contracts we can also only ever
// finalize a last batch of a tx. This means we can assume here that the batch given in the event is the last batch
// that was committed in the tx.
if event.BatchIndex().Uint64()+1 < uint64(len(args.BlobHashes)) {
return nil, nil, fmt.Errorf("batch index %d+1 is lower than the number of blobs %d", event.BatchIndex().Uint64(), len(args.BlobHashes))
}
firstBatchIndex := event.BatchIndex().Uint64() + 1 - uint64(len(args.BlobHashes))
var targetBatch encoding.DABatch
var targetBlobVersionedHash common.Hash
parentBatchHash := args.ParentBatchHash
for i, blobVersionedHash := range args.BlobHashes {
batchIndex := firstBatchIndex + uint64(i)
calculatedBatch, err := codec.NewDABatchFromParams(batchIndex, blobVersionedHash, parentBatchHash)
if err != nil {
return nil, nil, fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", event.BatchIndex().Uint64(), err)
}
parentBatchHash = calculatedBatch.Hash()
if batchIndex == event.BatchIndex().Uint64() {
if calculatedBatch.Hash() != event.BatchHash() {
return nil, nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex(), event.BatchHash().String(), calculatedBatch.Hash().String())
}
// We found the batch we are looking for, break out of the loop.
targetBatch = calculatedBatch
targetBlobVersionedHash = blobVersionedHash
break
}
}
if targetBatch == nil {
return nil, nil, fmt.Errorf("target batch with index %d could not be found and decoded", event.BatchIndex())
}
// sanity check that this is indeed the last batch in the tx
if targetBatch.Hash() != args.LastBatchHash {
return nil, nil, fmt.Errorf("last batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex(), args.LastBatchHash.String(), targetBatch.Hash().String())
}
// TODO: add support for multiple blob clients
blobClient := blob_client.NewBlobClients()
if r.cfg.RecoveryConfig.L1BeaconNodeEndpoint != "" {
client, err := blob_client.NewBeaconNodeClient(r.cfg.RecoveryConfig.L1BeaconNodeEndpoint)
if err != nil {
return nil, nil, fmt.Errorf("failed to create beacon node client: %w", err)
}
blobClient.AddBlobClient(client)
}
log.Info("Fetching blob by versioned hash and block time", "TargetBlobVersionedHash", targetBlobVersionedHash, "BlockTime", blockHeader.Time, "BlockNumber", blockHeader.Number)
blob, err := blobClient.GetBlobByVersionedHashAndBlockTime(r.ctx, targetBlobVersionedHash, blockHeader.Time)
if err != nil {
return nil, nil, fmt.Errorf("failed to get blob by versioned hash and block time for batch %d: %w", event.BatchIndex(), err)
}
daBlobPayload, err := codec.DecodeBlob(blob)
if err != nil {
return nil, nil, fmt.Errorf("failed to decode blob for batch %d: %w", event.BatchIndex(), err)
}
return targetBatch, daBlobPayload, nil
}
func (r *MinimalRecovery) fetchL2Blocks(fromBlock uint64, l2BlockHeightLimit uint64) (uint64, error) {
if l2BlockHeightLimit > 0 && fromBlock > l2BlockHeightLimit {
return 0, fmt.Errorf("fromBlock (latest finalized L2 block) is higher than specified L2BlockHeightLimit: %d > %d", fromBlock, l2BlockHeightLimit)
}
log.Info("Fetching L2 blocks with", "fromBlock", fromBlock, "l2BlockHeightLimit", l2BlockHeightLimit)
// Fetch and insert the missing blocks from the last block in the batch to the latest L2 block.
latestL2Block, err := r.l2Watcher.Client.BlockNumber(r.ctx)
if err != nil {
return 0, fmt.Errorf("failed to get latest L2 block number: %w", err)
}
log.Info("Latest L2 block number", "latest L2 block", latestL2Block)
if l2BlockHeightLimit > latestL2Block {
return 0, fmt.Errorf("l2BlockHeightLimit is higher than the latest L2 block number, not all blocks are available in L2geth: %d > %d", l2BlockHeightLimit, latestL2Block)
}
toBlock := latestL2Block
if l2BlockHeightLimit > 0 {
toBlock = l2BlockHeightLimit
}
err = r.l2Watcher.GetAndStoreBlocks(r.ctx, fromBlock, toBlock)
if err != nil {
return 0, fmt.Errorf("failed to get and store blocks: %w", err)
}
log.Info("Fetched L2 blocks from", "fromBlock", fromBlock, "toBlock", toBlock)
return toBlock, nil
}
func (r *MinimalRecovery) resetDB() error {
sqlDB, err := r.db.DB()
if err != nil {
return fmt.Errorf("failed to get db connection: %w", err)
}
if err = migrate.ResetDB(sqlDB); err != nil {
return fmt.Errorf("failed to reset db: %w", err)
}
return nil
}

View File

@@ -0,0 +1,265 @@
package permissionless_batches
import (
"context"
"errors"
"fmt"
"math/big"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/da-codec/encoding"
"gorm.io/gorm"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/sender"
"scroll-tech/rollup/internal/orm"
)
type Submitter struct {
ctx context.Context
db *gorm.DB
l2BlockOrm *orm.L2Block
chunkOrm *orm.Chunk
batchOrm *orm.Batch
bundleOrm *orm.Bundle
cfg *config.RelayerConfig
finalizeSender *sender.Sender
l1RollupABI *abi.ABI
chainCfg *params.ChainConfig
}
func NewSubmitter(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig) (*Submitter, error) {
registry := prometheus.DefaultRegisterer
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderSignerConfig, "permissionless_batches_submitter", "finalize_sender", types.SenderTypeFinalizeBatch, db, registry)
if err != nil {
return nil, fmt.Errorf("new finalize sender failed, err: %w", err)
}
return &Submitter{
ctx: ctx,
db: db,
l2BlockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
bundleOrm: orm.NewBundle(db),
cfg: cfg,
finalizeSender: finalizeSender,
l1RollupABI: bridgeAbi.ScrollChainABI,
chainCfg: chainCfg,
}, nil
}
func (s *Submitter) Sender() *sender.Sender {
return s.finalizeSender
}
func (s *Submitter) Submit(withProof bool) error {
// Check if the bundle is already finalized
bundle, err := s.bundleOrm.GetLatestBundle(s.ctx)
if err != nil {
return fmt.Errorf("error loading latest bundle: %w", err)
}
if bundle.Index != defaultFakeRestoredBundleIndex+1 {
return fmt.Errorf("unexpected bundle index %d with hash %s, expected %d", bundle.Index, bundle.Hash, defaultFakeRestoredBundleIndex+1)
}
if types.RollupStatus(bundle.RollupStatus) == types.RollupFinalized {
return fmt.Errorf("bundle %d %s is already finalized. nothing to do", bundle.Index, bundle.Hash)
}
if bundle.StartBatchIndex != bundle.EndBatchIndex {
return fmt.Errorf("bundle %d %s has unexpected batch indices (should only contain a single batch): start %d, end %d", bundle.Index, bundle.Hash, bundle.StartBatchIndex, bundle.EndBatchIndex)
}
if bundle.StartBatchHash != bundle.EndBatchHash {
return fmt.Errorf("bundle %d %s has unexpected batch hashes (should only contain a single batch): start %s, end %s", bundle.Index, bundle.Hash, bundle.StartBatchHash, bundle.EndBatchHash)
}
batch, err := s.batchOrm.GetBatchByIndex(s.ctx, bundle.StartBatchIndex)
if err != nil {
return fmt.Errorf("failed to load batch %d: %w", bundle.StartBatchIndex, err)
}
if batch == nil {
return fmt.Errorf("batch %d not found", bundle.StartBatchIndex)
}
if batch.Hash != bundle.StartBatchHash {
return fmt.Errorf("bundle %d %s has unexpected batch hash: %s", bundle.Index, bundle.Hash, batch.Hash)
}
log.Info("submitting batch", "index", batch.Index, "hash", batch.Hash)
endChunk, err := s.chunkOrm.GetChunkByIndex(s.ctx, batch.EndChunkIndex)
if err != nil || endChunk == nil {
return fmt.Errorf("failed to get end chunk with index %d of batch: %w", batch.EndChunkIndex, err)
}
var aggProof *message.OpenVMBundleProof
if withProof {
firstChunk, err := s.chunkOrm.GetChunkByIndex(s.ctx, batch.StartChunkIndex)
if err != nil || firstChunk == nil {
return fmt.Errorf("failed to get first chunk %d of batch: %w", batch.StartChunkIndex, err)
}
aggProof, err = s.bundleOrm.GetVerifiedProofByHash(s.ctx, bundle.Hash)
if err != nil {
return fmt.Errorf("failed to get verified proof by bundle index: %d, err: %w", bundle.Index, err)
}
if err = aggProof.SanityCheck(); err != nil {
return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", bundle.Index, err)
}
}
var calldata []byte
var blob *kzg4844.Blob
switch encoding.CodecVersion(bundle.CodecVersion) {
case encoding.CodecV7:
calldata, blob, err = s.constructCommitAndFinalizeCalldataAndBlob(batch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct CommitAndFinalize calldata and blob, bundle index: %v, batch index: %v, err: %w", bundle.Index, batch.Index, err)
}
default:
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
}
txHash, _, err := s.finalizeSender.SendTransaction("commitAndFinalize-"+bundle.Hash, &s.cfg.RollupContractAddress, calldata, []*kzg4844.Blob{blob})
if err != nil {
log.Error("commitAndFinalize in layer1 failed", "with proof", withProof, "index", bundle.Index,
"batch index", bundle.StartBatchIndex,
"RollupContractAddress", s.cfg.RollupContractAddress, "err", err, "calldata", common.Bytes2Hex(calldata))
var rpcError rpc.DataError
if errors.As(err, &rpcError) {
log.Error("rpc.DataError ", "error", rpcError.Error(), "message", rpcError.ErrorData())
}
return fmt.Errorf("commitAndFinalize failed, bundle index: %d, err: %w", bundle.Index, err)
}
log.Info("commitAndFinalize in layer1", "with proof", withProof, "batch index", bundle.StartBatchIndex, "tx hash", txHash.String())
// Updating rollup status in database.
err = s.db.Transaction(func(dbTX *gorm.DB) error {
if err = s.batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(s.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing, dbTX); err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatusByBundleHash failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
return err
}
if err = s.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(s.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing, dbTX); err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
return err
}
return nil
})
if err != nil {
log.Warn("failed to update rollup status of bundle and batches", "err", err)
return err
}
// Updating the proving status when finalizing without proof, thus the coordinator could omit this task.
// it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus
if !withProof {
txErr := s.db.Transaction(func(dbTX *gorm.DB) error {
if updateErr := s.bundleOrm.UpdateProvingStatus(s.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil {
return updateErr
}
if updateErr := s.batchOrm.UpdateProvingStatusByBundleHash(s.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil {
return updateErr
}
for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ {
tmpBatch, getErr := s.batchOrm.GetBatchByIndex(s.ctx, batchIndex)
if getErr != nil {
return getErr
}
if updateErr := s.chunkOrm.UpdateProvingStatusByBatchHash(s.ctx, tmpBatch.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil {
return updateErr
}
}
return nil
})
if txErr != nil {
log.Error("Updating chunk and batch proving status when finalizing without proof failure", "bundleHash", bundle.Hash, "err", txErr)
}
}
return nil
}
func (s *Submitter) constructCommitAndFinalizeCalldataAndBlob(batch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, *kzg4844.Blob, error) {
// Create the FinalizeStruct tuple as an abi-compatible struct
finalizeStruct := struct {
BatchHeader []byte
TotalL1MessagesPoppedOverall *big.Int
PostStateRoot common.Hash
WithdrawRoot common.Hash
ZkProof []byte
}{
BatchHeader: batch.BatchHeader,
TotalL1MessagesPoppedOverall: new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore + endChunk.TotalL1MessagesPoppedInChunk),
PostStateRoot: common.HexToHash(batch.StateRoot),
WithdrawRoot: common.HexToHash(batch.WithdrawRoot),
}
if aggProof != nil {
finalizeStruct.ZkProof = aggProof.Proof()
}
calldata, err := s.l1RollupABI.Pack("commitAndFinalizeBatch", uint8(batch.CodecVersion), common.HexToHash(batch.ParentBatchHash), finalizeStruct)
if err != nil {
return nil, nil, fmt.Errorf("failed to pack commitAndFinalizeBatch: %w", err)
}
chunks, err := s.chunkOrm.GetChunksInRange(s.ctx, batch.StartChunkIndex, batch.EndChunkIndex)
if err != nil {
return nil, nil, fmt.Errorf("failed to get chunks in range for batch %d: %w", batch.Index, err)
}
if chunks[len(chunks)-1].Index != batch.EndChunkIndex {
return nil, nil, fmt.Errorf("unexpected last chunk index %d, expected %d", chunks[len(chunks)-1].Index, batch.EndChunkIndex)
}
var batchBlocks []*encoding.Block
for _, c := range chunks {
blocks, err := s.l2BlockOrm.GetL2BlocksInRange(s.ctx, c.StartBlockNumber, c.EndBlockNumber)
if err != nil {
return nil, nil, fmt.Errorf("failed to get blocks in range for batch %d: %w", batch.Index, err)
}
batchBlocks = append(batchBlocks, blocks...)
}
encodingBatch := &encoding.Batch{
Index: batch.Index,
ParentBatchHash: common.HexToHash(batch.ParentBatchHash),
PrevL1MessageQueueHash: common.HexToHash(batch.PrevL1MessageQueueHash),
PostL1MessageQueueHash: common.HexToHash(batch.PostL1MessageQueueHash),
Blocks: batchBlocks,
}
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(batch.CodecVersion))
if err != nil {
return nil, nil, fmt.Errorf("failed to get codec from version %d, err: %w", batch.CodecVersion, err)
}
daBatch, err := codec.NewDABatch(encodingBatch)
if err != nil {
return nil, nil, fmt.Errorf("failed to create DA batch: %w", err)
}
return calldata, daBatch.Blob(), nil
}

View File

@@ -0,0 +1,476 @@
package relayer
import (
"context"
"fmt"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
"github.com/scroll-tech/go-ethereum/rollup/l1"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/watcher"
"scroll-tech/rollup/internal/orm"
butils "scroll-tech/rollup/internal/utils"
)
type FullRecovery struct {
ctx context.Context
cfg *config.Config
genesis *core.Genesis
db *gorm.DB
blockORM *orm.L2Block
chunkORM *orm.Chunk
batchORM *orm.Batch
bundleORM *orm.Bundle
chunkProposer *watcher.ChunkProposer
batchProposer *watcher.BatchProposer
bundleProposer *watcher.BundleProposer
l2Watcher *watcher.L2WatcherClient
l1Client *ethclient.Client
l1Reader *l1.Reader
beaconNodeClient *blob_client.BeaconNodeClient
}
func NewFullRecovery(ctx context.Context, cfg *config.Config, genesis *core.Genesis, db *gorm.DB, chunkProposer *watcher.ChunkProposer, batchProposer *watcher.BatchProposer, bundleProposer *watcher.BundleProposer, l2Watcher *watcher.L2WatcherClient, l1Client *ethclient.Client, l1Reader *l1.Reader) (*FullRecovery, error) {
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.L1Config.BeaconNodeEndpoint)
if err != nil {
return nil, fmt.Errorf("create blob client failed: %v", err)
}
return &FullRecovery{
ctx: ctx,
cfg: cfg,
genesis: genesis,
db: db,
blockORM: orm.NewL2Block(db),
chunkORM: orm.NewChunk(db),
batchORM: orm.NewBatch(db),
bundleORM: orm.NewBundle(db),
chunkProposer: chunkProposer,
batchProposer: batchProposer,
bundleProposer: bundleProposer,
l2Watcher: l2Watcher,
l1Client: l1Client,
l1Reader: l1Reader,
beaconNodeClient: beaconNodeClient,
}, nil
}
// RestoreFullPreviousState restores the full state from L1.
// The DB state should be clean: the latest batch in the DB should be finalized on L1. This function will
// restore all batches between the latest finalized batch in the DB and the latest finalized batch on L1.
func (f *FullRecovery) RestoreFullPreviousState() error {
log.Info("Restoring full previous state")
// 1. Get latest finalized batch stored in DB
latestDBBatch, err := f.batchORM.GetLatestBatch(f.ctx)
if err != nil {
return fmt.Errorf("failed to get latest batch from DB: %w", err)
}
log.Info("Latest finalized batch in DB", "batch", latestDBBatch.Index, "hash", latestDBBatch.Hash)
// 2. Get latest finalized L1 block
latestFinalizedL1Block, err := f.l1Reader.GetLatestFinalizedBlockNumber()
if err != nil {
return fmt.Errorf("failed to get latest finalized L1 block number: %w", err)
}
log.Info("Latest finalized L1 block number", "latest finalized L1 block", latestFinalizedL1Block)
// 3. Get latest finalized batch from contract (at latest finalized L1 block)
latestFinalizedBatchContract, err := f.l1Reader.LatestFinalizedBatchIndex(latestFinalizedL1Block)
if err != nil {
return fmt.Errorf("failed to get latest finalized batch: %w", err)
}
log.Info("Latest finalized batch from L1 contract", "latest finalized batch", latestFinalizedBatchContract, "at latest finalized L1 block", latestFinalizedL1Block)
// 4. Get batches one by one from stored in DB to latest finalized batch.
var fromBlock uint64
if latestDBBatch.Index > 0 {
receipt, err := f.l1Client.TransactionReceipt(f.ctx, common.HexToHash(latestDBBatch.CommitTxHash))
if err != nil {
return fmt.Errorf("failed to get transaction receipt of latest DB batch finalization transaction: %w", err)
}
fromBlock = receipt.BlockNumber.Uint64()
} else {
fromBlock = f.cfg.L1Config.StartHeight
}
log.Info("Fetching rollup events from L1", "from block", fromBlock, "to block", latestFinalizedL1Block, "from batch", latestDBBatch.Index, "to batch", latestFinalizedBatchContract)
commitsHeapMap := common.NewHeapMap[uint64, *l1.CommitBatchEvent](func(event *l1.CommitBatchEvent) uint64 {
return event.BatchIndex().Uint64()
})
batchEventsHeap := common.NewHeap[*batchEvents]()
var bundles [][]*batchEvents
err = f.l1Reader.FetchRollupEventsInRangeWithCallback(fromBlock, latestFinalizedL1Block, func(event l1.RollupEvent) bool {
// We're only interested in batches that are newer than the latest finalized batch in the DB.
if event.BatchIndex().Uint64() <= latestDBBatch.Index {
return true
}
switch event.Type() {
case l1.CommitEventType:
commitEvent := event.(*l1.CommitBatchEvent)
commitsHeapMap.Push(commitEvent)
case l1.FinalizeEventType:
finalizeEvent := event.(*l1.FinalizeBatchEvent)
var bundle []*batchEvents
// with bundles all committed batches until this finalized batch are finalized in the same bundle
for commitsHeapMap.Len() > 0 {
commitEvent := commitsHeapMap.Peek()
if commitEvent.BatchIndex().Uint64() > finalizeEvent.BatchIndex().Uint64() {
break
}
bEvents := newBatchEvents(commitEvent, finalizeEvent)
commitsHeapMap.Pop()
batchEventsHeap.Push(bEvents)
bundle = append(bundle, bEvents)
}
bundles = append(bundles, bundle)
// Stop fetching rollup events if we reached the latest finalized batch.
if finalizeEvent.BatchIndex().Uint64() >= latestFinalizedBatchContract {
return false
}
case l1.RevertEventV0Type:
// We ignore reverted batches.
commitsHeapMap.RemoveByKey(event.BatchIndex().Uint64())
case l1.RevertEventV7Type:
// We ignore reverted batches.
revertBatch, ok := event.(*l1.RevertBatchEventV7)
if !ok {
log.Error(fmt.Sprintf("unexpected type of revert event: %T, expected RevertEventV7Type", event))
return false
}
// delete all batches from revertBatch.StartBatchIndex (inclusive) to revertBatch.FinishBatchIndex (inclusive)
for i := revertBatch.StartBatchIndex().Uint64(); i <= revertBatch.FinishBatchIndex().Uint64(); i++ {
commitsHeapMap.RemoveByKey(i)
}
}
return true
})
if err != nil {
return fmt.Errorf("failed to fetch rollup events: %w", err)
}
// 5. Process all finalized batches: fetch L2 blocks and reproduce chunks and batches.
var batches []*batchEvents
for batchEventsHeap.Len() > 0 {
nextBatch := batchEventsHeap.Pop().Value()
batches = append(batches, nextBatch)
}
if err = f.processFinalizedBatches(batches); err != nil {
return fmt.Errorf("failed to process finalized batches: %w", err)
}
// 6. Create bundles if needed.
for _, bundle := range bundles {
var dbBatches []*orm.Batch
var lastBatchInBundle *orm.Batch
for _, batch := range bundle {
dbBatch, err := f.batchORM.GetBatchByIndex(f.ctx, batch.commit.BatchIndex().Uint64())
if err != nil {
return fmt.Errorf("failed to get batch by index for bundle generation: %w", err)
}
// Bundles are only supported for codec version 3 and above.
if encoding.CodecVersion(dbBatch.CodecVersion) < encoding.CodecV3 {
break
}
dbBatches = append(dbBatches, dbBatch)
lastBatchInBundle = dbBatch
}
if len(dbBatches) == 0 {
continue
}
err = f.db.Transaction(func(dbTX *gorm.DB) error {
newBundle, err := f.bundleORM.InsertBundle(f.ctx, dbBatches, encoding.CodecVersion(lastBatchInBundle.CodecVersion), dbTX)
if err != nil {
return fmt.Errorf("failed to insert bundle to DB: %w", err)
}
if err = f.batchORM.UpdateBundleHashInRange(f.ctx, newBundle.StartBatchIndex, newBundle.EndBatchIndex, newBundle.Hash, dbTX); err != nil {
return fmt.Errorf("failed to update bundle_hash %s for batches (%d to %d): %w", newBundle.Hash, newBundle.StartBatchIndex, newBundle.EndBatchIndex, err)
}
if err = f.bundleORM.UpdateFinalizeTxHashAndRollupStatus(f.ctx, newBundle.Hash, lastBatchInBundle.FinalizeTxHash, types.RollupFinalized, dbTX); err != nil {
return fmt.Errorf("failed to update finalize tx hash and rollup status for bundle %s: %w", newBundle.Hash, err)
}
if err = f.bundleORM.UpdateProvingStatus(f.ctx, newBundle.Hash, types.ProvingTaskVerified, dbTX); err != nil {
return fmt.Errorf("failed to update proving status for bundle %s: %w", newBundle.Hash, err)
}
log.Info("Inserted bundle", "hash", newBundle.Hash, "start batch index", newBundle.StartBatchIndex, "end batch index", newBundle.EndBatchIndex)
return nil
})
if err != nil {
return fmt.Errorf("failed to insert bundle in DB transaction: %w", err)
}
}
return nil
}
func (f *FullRecovery) processFinalizedBatches(batches []*batchEvents) error {
if len(batches) == 0 {
return fmt.Errorf("no finalized batches to process")
}
firstBatch := batches[0]
lastBatch := batches[len(batches)-1]
log.Info("Processing finalized batches", "first batch", firstBatch.commit.BatchIndex(), "hash", firstBatch.commit.BatchHash(), "last batch", lastBatch.commit.BatchIndex(), "hash", lastBatch.commit.BatchHash())
// Since multiple CommitBatch events per transaction is introduced >= CodecV7,
// with one transaction carrying multiple blobs,
// each CommitBatch event corresponds to a blob containing block range data.
// To correctly process these events, we need to:
// 1. Parsing the associated blob data to extract the block range for each event
// 2. Tracking the parent batch hash for each processed CommitBatch event, to:
// - Validate the batch hash, since parent batch hash is needed to calculate the batch hash
// - Derive the index of the current batch by the number of parent batch hashes tracked
// In commitBatches and commitAndFinalizeBatch, the parent batch hash is passed in calldata,
// so that we can use it to get the first batch's parent batch hash, and derive the rest.
// The index map serves this purpose with:
// Key: commit transaction hash
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
txBlobIndexMap := make(map[common.Hash][]common.Hash)
for _, b := range batches {
args, err := f.l1Reader.FetchCommitTxData(b.commit)
if err != nil {
return fmt.Errorf("failed to fetch commit tx data of batch %d, tx hash: %v, err: %w", b.commit.BatchIndex().Uint64(), b.commit.TxHash().Hex(), err)
}
// all batches we process here will be > CodecV7 since that is the minimum codec version for permissionless batches
if args.Version < 7 {
return fmt.Errorf("unsupported codec version: %v, batch index: %v, tx hash: %s", args.Version, b.commit.BatchIndex().Uint64(), b.commit.TxHash().Hex())
}
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(args.Version))
if err != nil {
return fmt.Errorf("unsupported codec version: %v, err: %w", args.Version, err)
}
// we append the batch hash to the slice for the current commit transaction after processing the batch.
// that means the current index of the batch within the transaction is len(txBlobIndexMap[vlog.TxHash]).
currentIndex := len(txBlobIndexMap[b.commit.TxHash()])
if currentIndex >= len(args.BlobHashes) {
return fmt.Errorf("commit transaction %s has %d blobs, but trying to access index %d (batch index %d)",
b.commit.TxHash(), len(args.BlobHashes), currentIndex, b.commit.BatchIndex().Uint64())
}
blobVersionedHash := args.BlobHashes[currentIndex]
// validate the batch hash
var parentBatchHash common.Hash
if currentIndex == 0 {
parentBatchHash = args.ParentBatchHash
} else {
// here we need to subtract 1 from the current index to get the parent batch hash.
parentBatchHash = txBlobIndexMap[b.commit.TxHash()][currentIndex-1]
}
calculatedBatch, err := codec.NewDABatchFromParams(b.commit.BatchIndex().Uint64(), blobVersionedHash, parentBatchHash)
if err != nil {
return fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", b.commit.BatchIndex().Uint64(), err)
}
if calculatedBatch.Hash() != b.commit.BatchHash() {
return fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", b.commit.BatchIndex(), b.commit.BatchHash().String(), calculatedBatch.Hash().String())
}
txBlobIndexMap[b.commit.TxHash()] = append(txBlobIndexMap[b.commit.TxHash()], b.commit.BatchHash())
if err = f.insertBatchIntoDB(b, codec, blobVersionedHash); err != nil {
return fmt.Errorf("failed to insert batch into DB, batch index: %d, err: %w", b.commit.BatchIndex().Uint64(), err)
}
log.Info("Processed batch", "index", b.commit.BatchIndex(), "hash", b.commit.BatchHash(), "commit tx hash", b.commit.TxHash().Hex(), "finalize tx hash", b.finalize.TxHash().Hex(), "blob versioned hash", blobVersionedHash.String(), "parent batch hash", parentBatchHash.String())
}
return nil
}
func (f *FullRecovery) insertBatchIntoDB(batch *batchEvents, codec encoding.Codec, blobVersionedHash common.Hash) error {
// 5.1 Fetch block time.
blockHeader, err := f.l1Reader.FetchBlockHeaderByNumber(batch.commit.BlockNumber())
if err != nil {
return fmt.Errorf("failed to fetch block header by number %d: %w", batch.commit.BlockNumber(), err)
}
// 5.2 Fetch blob data for batch.
daBlocks, err := f.getBatchBlockRangeFromBlob(codec, blobVersionedHash, blockHeader.Time)
if err != nil {
return fmt.Errorf("failed to get batch block range from blob %s: %w", blobVersionedHash.Hex(), err)
}
lastBlock := daBlocks[len(daBlocks)-1]
// 5.2. Fetch L2 blocks for the entire batch.
if err = f.l2Watcher.TryFetchRunningMissingBlocks(lastBlock.Number()); err != nil {
return fmt.Errorf("failed to fetch L2 blocks: %w", err)
}
// 5.3. Reproduce chunk. Since we don't know the internals of a batch we just create 1 chunk per batch.
start := daBlocks[0].Number()
end := lastBlock.Number()
// get last chunk from DB
lastChunk, err := f.chunkORM.GetLatestChunk(f.ctx)
if err != nil {
return fmt.Errorf("failed to get latest chunk from DB: %w", err)
}
blocks, err := f.blockORM.GetL2BlocksInRange(f.ctx, start, end)
if err != nil {
return fmt.Errorf("failed to get L2 blocks in range: %w", err)
}
log.Info("Reproducing chunk", "start block", start, "end block", end)
var chunk encoding.Chunk
chunk.Blocks = blocks
chunk.PrevL1MessageQueueHash = common.HexToHash(lastChunk.PostL1MessageQueueHash)
chunk.PostL1MessageQueueHash, err = encoding.MessageQueueV2ApplyL1MessagesFromBlocks(chunk.PrevL1MessageQueueHash, blocks)
if err != nil {
return fmt.Errorf("failed to apply L1 messages from blocks: %w", err)
}
metrics, err := butils.CalculateChunkMetrics(&chunk, codec.Version())
if err != nil {
return fmt.Errorf("failed to calculate chunk metrics: %w", err)
}
var dbChunk *orm.Chunk
err = f.db.Transaction(func(dbTX *gorm.DB) error {
dbChunk, err = f.chunkORM.InsertChunk(f.ctx, &chunk, codec.Version(), *metrics, dbTX)
if err != nil {
return fmt.Errorf("failed to insert chunk to DB: %w", err)
}
if err := f.blockORM.UpdateChunkHashInRange(f.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
return fmt.Errorf("failed to update chunk_hash for l2_blocks (chunk hash: %s, start block: %d, end block: %d): %w", dbChunk.Hash, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, err)
}
if err = f.chunkORM.UpdateProvingStatus(f.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil {
return fmt.Errorf("failed to update proving status for chunk %s: %w", dbChunk.Hash, err)
}
log.Info("Inserted chunk", "index", dbChunk.Index, "hash", dbChunk.Hash, "start block", dbChunk.StartBlockNumber, "end block", dbChunk.EndBlockNumber)
return nil
})
if err != nil {
return fmt.Errorf("failed to insert chunk in DB transaction: %w", err)
}
// 5.4 Reproduce batch.
dbParentBatch, err := f.batchORM.GetLatestBatch(f.ctx)
if err != nil || dbParentBatch == nil {
return fmt.Errorf("failed to get latest batch from DB: %w", err)
}
var encBatch encoding.Batch
encBatch.Index = dbParentBatch.Index + 1
encBatch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash)
encBatch.TotalL1MessagePoppedBefore = dbChunk.TotalL1MessagesPoppedBefore
encBatch.PrevL1MessageQueueHash = chunk.PrevL1MessageQueueHash
encBatch.PostL1MessageQueueHash = chunk.PostL1MessageQueueHash
encBatch.Chunks = []*encoding.Chunk{&chunk}
encBatch.Blocks = blocks
batchMetrics, err := butils.CalculateBatchMetrics(&encBatch, codec.Version(), false)
if err != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", err)
}
err = f.db.Transaction(func(dbTX *gorm.DB) error {
dbBatch, err := f.batchORM.InsertBatch(f.ctx, &encBatch, codec.Version(), *batchMetrics, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch to DB: %w", err)
}
if err = f.chunkORM.UpdateBatchHashInRange(f.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex, dbBatch.Hash, dbTX); err != nil {
return fmt.Errorf("failed to update batch_hash for chunks (batch hash: %s, start chunk: %d, end chunk: %d): %w", dbBatch.Hash, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex, err)
}
if err = f.batchORM.UpdateProvingStatus(f.ctx, dbBatch.Hash, types.ProvingTaskVerified, dbTX); err != nil {
return fmt.Errorf("failed to update proving status for batch %s: %w", dbBatch.Hash, err)
}
if err = f.batchORM.UpdateRollupStatusCommitAndFinalizeTxHash(f.ctx, dbBatch.Hash, types.RollupFinalized, batch.commit.TxHash().Hex(), batch.finalize.TxHash().Hex(), dbTX); err != nil {
return fmt.Errorf("failed to update rollup status for batch %s: %w", dbBatch.Hash, err)
}
log.Info("Inserted batch", "index", dbBatch.Index, "hash", dbBatch.Hash, "start chunk", dbBatch.StartChunkIndex, "end chunk", dbBatch.EndChunkIndex)
return nil
})
if err != nil {
return fmt.Errorf("failed to insert batch in DB transaction: %w", err)
}
return nil
}
func (f *FullRecovery) getBatchBlockRangeFromBlob(codec encoding.Codec, blobVersionedHash common.Hash, l1BlockTime uint64) ([]encoding.DABlock, error) {
blob, err := f.beaconNodeClient.GetBlobByVersionedHashAndBlockTime(f.ctx, blobVersionedHash, l1BlockTime)
if err != nil {
return nil, fmt.Errorf("failed to get blob %s: %w", blobVersionedHash.Hex(), err)
}
if blob == nil {
return nil, fmt.Errorf("blob %s not found", blobVersionedHash.Hex())
}
blobPayload, err := codec.DecodeBlob(blob)
if err != nil {
return nil, fmt.Errorf("blob %s decode error: %w", blobVersionedHash.Hex(), err)
}
blocks := blobPayload.Blocks()
if len(blocks) == 0 {
return nil, fmt.Errorf("empty blocks in blob %s", blobVersionedHash.Hex())
}
return blocks, nil
}
type batchEvents struct {
commit *l1.CommitBatchEvent
finalize *l1.FinalizeBatchEvent
}
func newBatchEvents(commit *l1.CommitBatchEvent, finalize *l1.FinalizeBatchEvent) *batchEvents {
if commit.BatchIndex().Uint64() > finalize.BatchIndex().Uint64() {
panic(fmt.Sprintf("commit and finalize batch index mismatch: %d != %d", commit.BatchIndex().Uint64(), finalize.BatchIndex().Uint64()))
}
return &batchEvents{
commit: commit,
finalize: finalize,
}
}
func (e *batchEvents) CompareTo(other *batchEvents) int {
return e.commit.BatchIndex().Cmp(other.commit.BatchIndex())
}

View File

@@ -13,6 +13,8 @@ import (
"github.com/go-resty/resty/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/da-codec/encoding"
"gorm.io/gorm"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
@@ -20,7 +22,6 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
@@ -289,6 +290,12 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
log.Info("Validium importGenesis", "calldata", common.Bytes2Hex(calldata))
} else {
// rollup mode: pass batchHeader and stateRoot
// Check state root is not zero
if stateRoot == (common.Hash{}) {
return fmt.Errorf("state root is zero")
}
calldata, packErr = r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack rollup importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
@@ -501,6 +508,11 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
log.Error("failed to construct normal payload", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
return
}
if err = r.sanityChecksCommitBatchCodecV7CalldataAndBlobs(calldata, blobs); err != nil {
log.Error("Sanity check failed for calldata and blobs", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
return
}
}
default:
log.Error("unsupported codec version in ProcessPendingBatches", "codecVersion", codecVersion, "start index", firstBatch, "end index", lastBatch.Index)
@@ -998,6 +1010,18 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
}
func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithChunks) ([]byte, uint64, uint64, error) {
// Check state root is not zero
stateRoot := common.HexToHash(batch.Batch.StateRoot)
if stateRoot == (common.Hash{}) {
return nil, 0, 0, fmt.Errorf("batch %d state root is zero", batch.Batch.Index)
}
// Check parent batch hash is not zero
parentBatchHash := common.HexToHash(batch.Batch.ParentBatchHash)
if parentBatchHash == (common.Hash{}) {
return nil, 0, 0, fmt.Errorf("batch %d parent batch hash is zero", batch.Batch.Index)
}
// Calculate metrics
var maxBlockHeight uint64
var totalGasUsed uint64
@@ -1017,6 +1041,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithCh
lastChunk := batch.Chunks[len(batch.Chunks)-1]
commitment := common.HexToHash(lastChunk.EndBlockHash)
version := encoding.CodecVersion(batch.Batch.CodecVersion)
calldata, err := r.validiumABI.Pack("commitBatch", version, common.HexToHash(batch.Batch.ParentBatchHash), common.HexToHash(batch.Batch.StateRoot), common.HexToHash(batch.Batch.WithdrawRoot), commitment[:])
if err != nil {
@@ -1027,6 +1052,12 @@ func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithCh
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
// Check state root is not zero
stateRoot := common.HexToHash(dbBatch.StateRoot)
if stateRoot == (common.Hash{}) {
return nil, fmt.Errorf("batch %d state root is zero", dbBatch.Index)
}
if aggProof != nil { // finalizeBundle with proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBundlePostEuclidV2",

View File

@@ -0,0 +1,449 @@
package relayer
import (
"fmt"
"math/big"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"scroll-tech/rollup/internal/orm"
)
// sanityChecksCommitBatchCodecV7CalldataAndBlobs performs comprehensive validation of the constructed
// transaction data (calldata and blobs) by parsing them and comparing against database records.
// This ensures the constructed transaction data is correct and consistent with the database state.
func (r *Layer2Relayer) sanityChecksCommitBatchCodecV7CalldataAndBlobs(calldata []byte, blobs []*kzg4844.Blob) error {
calldataInfo, err := r.parseCommitBatchesCalldata(calldata)
if err != nil {
return fmt.Errorf("failed to parse calldata: %w", err)
}
batchesToValidate, err := r.getBatchesFromCalldata(calldataInfo)
if err != nil {
return fmt.Errorf("failed to get batches from database: %w", err)
}
if err := r.validateCalldataAndBlobsAgainstDatabase(calldataInfo, blobs, batchesToValidate); err != nil {
return fmt.Errorf("calldata and blobs validation failed: %w", err)
}
if err := r.validateDatabaseConsistency(batchesToValidate); err != nil {
return fmt.Errorf("database consistency validation failed: %w", err)
}
return nil
}
// CalldataInfo holds parsed information from commitBatches calldata
type CalldataInfo struct {
Version uint8
ParentBatchHash common.Hash
LastBatchHash common.Hash
}
// parseCommitBatchesCalldata parses the commitBatches calldata and extracts key information
func (r *Layer2Relayer) parseCommitBatchesCalldata(calldata []byte) (*CalldataInfo, error) {
method := r.l1RollupABI.Methods["commitBatches"]
decoded, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return nil, fmt.Errorf("failed to unpack commitBatches calldata: %w", err)
}
if len(decoded) != 3 {
return nil, fmt.Errorf("unexpected number of decoded parameters: got %d, want 3", len(decoded))
}
version, ok := decoded[0].(uint8)
if !ok {
return nil, fmt.Errorf("failed to type assert version to uint8")
}
parentBatchHashB, ok := decoded[1].([32]uint8)
if !ok {
return nil, fmt.Errorf("failed to type assert parentBatchHash to [32]uint8")
}
parentBatchHash := common.BytesToHash(parentBatchHashB[:])
lastBatchHashB, ok := decoded[2].([32]uint8)
if !ok {
return nil, fmt.Errorf("failed to type assert lastBatchHash to [32]uint8")
}
lastBatchHash := common.BytesToHash(lastBatchHashB[:])
return &CalldataInfo{
Version: version,
ParentBatchHash: parentBatchHash,
LastBatchHash: lastBatchHash,
}, nil
}
// getBatchesFromCalldata retrieves the relevant batches from database based on calldata information
func (r *Layer2Relayer) getBatchesFromCalldata(info *CalldataInfo) ([]*dbBatchWithChunks, error) {
// Get the parent batch to determine the starting point
parentBatch, err := r.batchOrm.GetBatchByHash(r.ctx, info.ParentBatchHash.Hex())
if err != nil {
return nil, fmt.Errorf("failed to get parent batch by hash %s: %w", info.ParentBatchHash.Hex(), err)
}
// Get the last batch to determine the ending point
lastBatch, err := r.batchOrm.GetBatchByHash(r.ctx, info.LastBatchHash.Hex())
if err != nil {
return nil, fmt.Errorf("failed to get last batch by hash %s: %w", info.LastBatchHash.Hex(), err)
}
// Get all batches in the range (parent+1 to last)
firstBatchIndex := parentBatch.Index + 1
lastBatchIndex := lastBatch.Index
// Check if the range is valid
if firstBatchIndex > lastBatchIndex {
return nil, fmt.Errorf("no batches found in range: first index %d, last index %d", firstBatchIndex, lastBatchIndex)
}
var batchesToValidate []*dbBatchWithChunks
for batchIndex := firstBatchIndex; batchIndex <= lastBatchIndex; batchIndex++ {
dbBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex)
if err != nil {
return nil, fmt.Errorf("failed to get batch by index %d: %w", batchIndex, err)
}
// Get chunks for this batch
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
if err != nil {
return nil, fmt.Errorf("failed to get chunks for batch %d: %w", batchIndex, err)
}
batchesToValidate = append(batchesToValidate, &dbBatchWithChunks{
Batch: dbBatch,
Chunks: dbChunks,
})
}
return batchesToValidate, nil
}
// validateDatabaseConsistency performs comprehensive validation of database records
func (r *Layer2Relayer) validateDatabaseConsistency(batchesToValidate []*dbBatchWithChunks) error {
if len(batchesToValidate) == 0 {
return fmt.Errorf("no batches to validate")
}
// Get previous chunk for continuity check
firstChunk := batchesToValidate[0].Chunks[0]
if firstChunk.Index == 0 {
return fmt.Errorf("genesis chunk should not be in normal batch submission flow, chunk index: %d", firstChunk.Index)
}
prevChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, firstChunk.Index-1)
if err != nil {
return fmt.Errorf("failed to get previous chunk %d for continuity check: %w", firstChunk.Index-1, err)
}
firstBatchCodecVersion := batchesToValidate[0].Batch.CodecVersion
for i, batch := range batchesToValidate {
// Validate codec version consistency
if batch.Batch.CodecVersion != firstBatchCodecVersion {
return fmt.Errorf("batch %d has different codec version %d, expected %d", batch.Batch.Index, batch.Batch.CodecVersion, firstBatchCodecVersion)
}
// Validate individual batch
if err := r.validateSingleBatchConsistency(batch, i, batchesToValidate); err != nil {
return err
}
// Validate chunks in this batch
if err := r.validateBatchChunksConsistency(batch, prevChunk); err != nil {
return err
}
// Update prevChunk to the last chunk of this batch for next iteration
if len(batch.Chunks) == 0 {
return fmt.Errorf("batch %d has no chunks", batch.Batch.Index)
}
prevChunk = batch.Chunks[len(batch.Chunks)-1]
}
return nil
}
// validateSingleBatchConsistency validates a single batch's consistency
func (r *Layer2Relayer) validateSingleBatchConsistency(batch *dbBatchWithChunks, i int, allBatches []*dbBatchWithChunks) error {
if batch == nil || batch.Batch == nil {
return fmt.Errorf("batch %d is nil", i)
}
if len(batch.Chunks) == 0 {
return fmt.Errorf("batch %d has no chunks", batch.Batch.Index)
}
// Validate essential batch fields
batchHash := common.HexToHash(batch.Batch.Hash)
if batchHash == (common.Hash{}) {
return fmt.Errorf("batch %d hash is zero", batch.Batch.Index)
}
if batch.Batch.Index == 0 {
return fmt.Errorf("batch %d has zero index (only genesis batch should have index 0)", i)
}
parentBatchHash := common.HexToHash(batch.Batch.ParentBatchHash)
if parentBatchHash == (common.Hash{}) {
return fmt.Errorf("batch %d parent batch hash is zero", batch.Batch.Index)
}
stateRoot := common.HexToHash(batch.Batch.StateRoot)
if stateRoot == (common.Hash{}) {
return fmt.Errorf("batch %d state root is zero", batch.Batch.Index)
}
// Check batch index continuity
if i > 0 {
prevBatch := allBatches[i-1]
if batch.Batch.Index != prevBatch.Batch.Index+1 {
return fmt.Errorf("batch index is not sequential: prev batch index %d, current batch index %d", prevBatch.Batch.Index, batch.Batch.Index)
}
if parentBatchHash != common.HexToHash(prevBatch.Batch.Hash) {
return fmt.Errorf("parent batch hash does not match previous batch hash: expected %s, got %s", prevBatch.Batch.Hash, batch.Batch.ParentBatchHash)
}
} else {
// For the first batch, verify continuity with parent batch from database
parentBatch, err := r.batchOrm.GetBatchByHash(r.ctx, batch.Batch.ParentBatchHash)
if err != nil {
return fmt.Errorf("failed to get parent batch %s for batch %d: %w", batch.Batch.ParentBatchHash, batch.Batch.Index, err)
}
if batch.Batch.Index != parentBatch.Index+1 {
return fmt.Errorf("first batch index is not sequential with parent: parent batch index %d, current batch index %d", parentBatch.Index, batch.Batch.Index)
}
}
// Validate L1 message queue consistency
if err := r.validateMessageQueueConsistency(batch.Batch.Index, batch.Chunks, common.HexToHash(batch.Batch.PrevL1MessageQueueHash), common.HexToHash(batch.Batch.PostL1MessageQueueHash)); err != nil {
return err
}
return nil
}
// validateBatchChunksConsistency validates chunks within a batch
func (r *Layer2Relayer) validateBatchChunksConsistency(batch *dbBatchWithChunks, prevChunk *orm.Chunk) error {
// Check codec version consistency between chunks and batch
for _, chunk := range batch.Chunks {
if chunk.CodecVersion != batch.Batch.CodecVersion {
return fmt.Errorf("batch %d chunk %d has different codec version %d, expected %d", batch.Batch.Index, chunk.Index, chunk.CodecVersion, batch.Batch.CodecVersion)
}
}
// Validate each chunk individually
currentPrevChunk := prevChunk
for j, chunk := range batch.Chunks {
if err := r.validateSingleChunkConsistency(chunk, currentPrevChunk); err != nil {
return fmt.Errorf("batch %d chunk %d: %w", batch.Batch.Index, j, err)
}
currentPrevChunk = chunk
}
return nil
}
// validateSingleChunkConsistency validates a single chunk
func (r *Layer2Relayer) validateSingleChunkConsistency(chunk *orm.Chunk, prevChunk *orm.Chunk) error {
if chunk == nil {
return fmt.Errorf("chunk is nil")
}
chunkHash := common.HexToHash(chunk.Hash)
if chunkHash == (common.Hash{}) {
return fmt.Errorf("chunk %d hash is zero", chunk.Index)
}
// Check chunk index continuity
if chunk.Index != prevChunk.Index+1 {
return fmt.Errorf("chunk index is not sequential: prev chunk index %d, current chunk index %d", prevChunk.Index, chunk.Index)
}
// Validate block range
if chunk.StartBlockNumber == 0 && chunk.EndBlockNumber == 0 {
return fmt.Errorf("chunk %d has zero block range", chunk.Index)
}
if chunk.StartBlockNumber > chunk.EndBlockNumber {
return fmt.Errorf("chunk %d has invalid block range: start %d > end %d", chunk.Index, chunk.StartBlockNumber, chunk.EndBlockNumber)
}
// Check hash fields
startBlockHash := common.HexToHash(chunk.StartBlockHash)
if startBlockHash == (common.Hash{}) {
return fmt.Errorf("chunk %d start block hash is zero", chunk.Index)
}
endBlockHash := common.HexToHash(chunk.EndBlockHash)
if endBlockHash == (common.Hash{}) {
return fmt.Errorf("chunk %d end block hash is zero", chunk.Index)
}
// Check block continuity with previous chunk
if prevChunk.EndBlockNumber+1 != chunk.StartBlockNumber {
return fmt.Errorf("chunk is not continuous with previous chunk %d: prev end block %d, current start block %d", prevChunk.Index, prevChunk.EndBlockNumber, chunk.StartBlockNumber)
}
// Check L1 messages continuity
expectedPoppedBefore := prevChunk.TotalL1MessagesPoppedBefore + prevChunk.TotalL1MessagesPoppedInChunk
if chunk.TotalL1MessagesPoppedBefore != expectedPoppedBefore {
return fmt.Errorf("L1 messages popped before is incorrect: expected %d, got %d", expectedPoppedBefore, chunk.TotalL1MessagesPoppedBefore)
}
return nil
}
// validateCalldataAndBlobsAgainstDatabase validates calldata and blobs against database records
func (r *Layer2Relayer) validateCalldataAndBlobsAgainstDatabase(calldataInfo *CalldataInfo, blobs []*kzg4844.Blob, batchesToValidate []*dbBatchWithChunks) error {
// Validate blobs
if len(blobs) == 0 {
return fmt.Errorf("no blobs provided")
}
// Validate blob count
if len(blobs) != len(batchesToValidate) {
return fmt.Errorf("blob count mismatch: got %d blobs, expected %d batches", len(blobs), len(batchesToValidate))
}
// Get first and last batches for validation, length check is already done above
firstBatch := batchesToValidate[0].Batch
lastBatch := batchesToValidate[len(batchesToValidate)-1].Batch
// Validate codec version
if calldataInfo.Version != uint8(firstBatch.CodecVersion) {
return fmt.Errorf("version mismatch: calldata=%d, db=%d", calldataInfo.Version, firstBatch.CodecVersion)
}
// Validate parent batch hash
if calldataInfo.ParentBatchHash != common.HexToHash(firstBatch.ParentBatchHash) {
return fmt.Errorf("parentBatchHash mismatch: calldata=%s, db=%s", calldataInfo.ParentBatchHash.Hex(), firstBatch.ParentBatchHash)
}
// Validate last batch hash
if calldataInfo.LastBatchHash != common.HexToHash(lastBatch.Hash) {
return fmt.Errorf("lastBatchHash mismatch: calldata=%s, db=%s", calldataInfo.LastBatchHash.Hex(), lastBatch.Hash)
}
// Get codec for blob decoding
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(firstBatch.CodecVersion))
if err != nil {
return fmt.Errorf("failed to get codec: %w", err)
}
// Validate each blob against its corresponding batch
for i, blob := range blobs {
dbBatch := batchesToValidate[i].Batch
if err := r.validateSingleBlobAgainstBatch(blob, dbBatch, codec); err != nil {
return fmt.Errorf("blob validation failed for batch %d: %w", dbBatch.Index, err)
}
}
return nil
}
// validateSingleBlobAgainstBatch validates a single blob against its batch data
func (r *Layer2Relayer) validateSingleBlobAgainstBatch(blob *kzg4844.Blob, dbBatch *orm.Batch, codec encoding.Codec) error {
// Decode blob payload
payload, err := codec.DecodeBlob(blob)
if err != nil {
return fmt.Errorf("failed to decode blob: %w", err)
}
// Validate batch hash
daBatch, err := assembleDABatchFromPayload(payload, dbBatch, codec)
if err != nil {
return fmt.Errorf("failed to assemble batch from payload: %w", err)
}
if daBatch.Hash() != common.HexToHash(dbBatch.Hash) {
return fmt.Errorf("batch hash mismatch: decoded from blob=%s, db=%s", daBatch.Hash().Hex(), dbBatch.Hash)
}
return nil
}
// validateMessageQueueConsistency validates L1 message queue hash consistency
func (r *Layer2Relayer) validateMessageQueueConsistency(batchIndex uint64, chunks []*orm.Chunk, prevL1MsgQueueHash common.Hash, postL1MsgQueueHash common.Hash) error {
if len(chunks) == 0 {
return fmt.Errorf("batch %d has no chunks for message queue validation", batchIndex)
}
firstChunk := chunks[0]
lastChunk := chunks[len(chunks)-1]
// Calculate total L1 messages in this batch
var totalL1MessagesInBatch uint64
for _, chunk := range chunks {
totalL1MessagesInBatch += chunk.TotalL1MessagesPoppedInChunk
}
// If there were L1 messages processed before this batch, prev hash should not be zero
if firstChunk.TotalL1MessagesPoppedBefore > 0 && prevL1MsgQueueHash == (common.Hash{}) {
return fmt.Errorf("batch %d prev L1 message queue hash is zero but %d L1 messages were processed before", batchIndex, firstChunk.TotalL1MessagesPoppedBefore)
}
// If there are any L1 messages processed up to this batch, post hash should not be zero
totalL1MessagesProcessed := lastChunk.TotalL1MessagesPoppedBefore + lastChunk.TotalL1MessagesPoppedInChunk
if totalL1MessagesProcessed > 0 && postL1MsgQueueHash == (common.Hash{}) {
return fmt.Errorf("batch %d post L1 message queue hash is zero but %d L1 messages were processed in total", batchIndex, totalL1MessagesProcessed)
}
// Prev and post queue hashes should be different if L1 messages were processed in this batch
if totalL1MessagesInBatch > 0 && prevL1MsgQueueHash == postL1MsgQueueHash {
return fmt.Errorf("batch %d has same prev and post L1 message queue hashes but processed %d L1 messages in this batch", batchIndex, totalL1MessagesInBatch)
}
return nil
}
func assembleDABatchFromPayload(payload encoding.DABlobPayload, dbBatch *orm.Batch, codec encoding.Codec) (encoding.DABatch, error) {
blocks, err := assembleBlocksFromPayload(payload)
if err != nil {
return nil, fmt.Errorf("failed to assemble blocks from payload batch_index=%d codec_version=%d parent_batch_hash=%s: %w", dbBatch.Index, dbBatch.CodecVersion, dbBatch.ParentBatchHash, err)
}
batch := &encoding.Batch{
Index: dbBatch.Index, // The database provides only batch index, other fields are derived from blob payload
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash), // The first batch's parent hash is verified with calldata, subsequent batches are linked via dbBatch.ParentBatchHash and verified in database consistency checks
PrevL1MessageQueueHash: payload.PrevL1MessageQueueHash(),
PostL1MessageQueueHash: payload.PostL1MessageQueueHash(),
Blocks: blocks,
Chunks: []*encoding.Chunk{ // One chunk for this batch to pass sanity checks when building DABatch
{
Blocks: blocks,
PrevL1MessageQueueHash: payload.PrevL1MessageQueueHash(),
PostL1MessageQueueHash: payload.PostL1MessageQueueHash(),
},
},
}
daBatch, err := codec.NewDABatch(batch)
if err != nil {
return nil, fmt.Errorf("failed to build DABatch batch_index=%d codec_version=%d parent_batch_hash=%s: %w", dbBatch.Index, dbBatch.CodecVersion, dbBatch.ParentBatchHash, err)
}
return daBatch, nil
}
func assembleBlocksFromPayload(payload encoding.DABlobPayload) ([]*encoding.Block, error) {
daBlocks := payload.Blocks()
txns := payload.Transactions()
if len(daBlocks) != len(txns) {
return nil, fmt.Errorf("mismatched number of blocks and transactions: %d blocks, %d transactions", len(daBlocks), len(txns))
}
blocks := make([]*encoding.Block, len(daBlocks))
for i := range daBlocks {
blocks[i] = &encoding.Block{
Header: &types.Header{
Number: new(big.Int).SetUint64(daBlocks[i].Number()),
Time: daBlocks[i].Timestamp(),
BaseFee: daBlocks[i].BaseFee(),
GasLimit: daBlocks[i].GasLimit(),
},
Transactions: encoding.TxsToTxsData(txns[i]),
}
}
return blocks, nil
}

View File

@@ -70,15 +70,18 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV7, rutils.ChunkMetrics{})
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
genesisBatch, err := batchOrm.GetBatchByIndex(context.Background(), 0)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
ParentBatchHash: common.HexToHash(genesisBatch.Hash),
Chunks: []*encoding.Chunk{chunk1, chunk2},
Blocks: []*encoding.Block{block1, block2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV7, rutils.BatchMetrics{})
assert.NoError(t, err)

View File

@@ -81,6 +81,7 @@ func setupEnv(t *testing.T) {
block1 = &encoding.Block{}
err = json.Unmarshal(templateBlockTrace1, block1)
assert.NoError(t, err)
block1.Header.Number = big.NewInt(1)
chunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
codec, err := encoding.CodecFromVersion(encoding.CodecV0)
assert.NoError(t, err)
@@ -94,6 +95,7 @@ func setupEnv(t *testing.T) {
block2 = &encoding.Block{}
err = json.Unmarshal(templateBlockTrace2, block2)
assert.NoError(t, err)
block2.Header.Number = big.NewInt(2)
chunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
daChunk2, err := codec.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
assert.NoError(t, err)

View File

@@ -63,7 +63,7 @@ type FeeData struct {
gasLimit uint64
}
// Sender Transaction sender to send transaction to l1/l2 geth
// Sender Transaction sender to send transaction to l1/l2
type Sender struct {
config *config.SenderConfig
gethClient *gethclient.Client
@@ -105,13 +105,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
return nil, fmt.Errorf("failed to create transaction signer, err: %w", err)
}
// Set pending nonce
nonce, err := client.PendingNonceAt(ctx, transactionSigner.GetAddr())
if err != nil {
return nil, fmt.Errorf("failed to get pending nonce for address %s, err: %w", transactionSigner.GetAddr(), err)
}
transactionSigner.SetNonce(nonce)
// Create sender instance first and then initialize nonce
sender := &Sender{
ctx: ctx,
config: config,
@@ -127,8 +121,13 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
service: service,
senderType: senderType,
}
sender.metrics = initSenderMetrics(reg)
// Initialize nonce using the new method
if err := sender.resetNonce(); err != nil {
return nil, fmt.Errorf("failed to reset nonce: %w", err)
}
sender.metrics = initSenderMetrics(reg)
go sender.loop(ctx)
return sender, nil
@@ -242,7 +241,10 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
// Check if contain nonce, and reset nonce
// only reset nonce when it is not from resubmit
if strings.Contains(err.Error(), "nonce too low") {
s.resetNonce(context.Background())
if err := s.resetNonce(); err != nil {
log.Warn("failed to reset nonce after failed send transaction", "address", s.transactionSigner.GetAddr().String(), "err", err)
return common.Hash{}, 0, fmt.Errorf("failed to reset nonce after failed send transaction, err: %w", err)
}
}
return common.Hash{}, 0, fmt.Errorf("failed to send transaction, err: %w", err)
}
@@ -327,14 +329,46 @@ func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte,
return signedTx, nil
}
// resetNonce reset nonce if send signed tx failed.
func (s *Sender) resetNonce(ctx context.Context) {
nonce, err := s.client.PendingNonceAt(ctx, s.transactionSigner.GetAddr())
// initializeNonce initializes the nonce by taking the maximum of database nonce and pending nonce.
func (s *Sender) initializeNonce() (uint64, error) {
// Get maximum nonce from database
dbNonce, err := s.pendingTransactionOrm.GetMaxNonceBySenderAddress(s.ctx, s.transactionSigner.GetAddr().Hex())
if err != nil {
log.Warn("failed to reset nonce", "address", s.transactionSigner.GetAddr().String(), "err", err)
return
return 0, fmt.Errorf("failed to get max nonce from database for address %s, err: %w", s.transactionSigner.GetAddr().Hex(), err)
}
// Get pending nonce from the client
pendingNonce, err := s.client.PendingNonceAt(s.ctx, s.transactionSigner.GetAddr())
if err != nil {
return 0, fmt.Errorf("failed to get pending nonce for address %s, err: %w", s.transactionSigner.GetAddr().Hex(), err)
}
// Take the maximum of pending nonce and (db nonce + 1)
// Database stores the used nonce, so the next available nonce should be dbNonce + 1
// When dbNonce is -1 (no records), dbNonce + 1 = 0, which is correct
nextDbNonce := uint64(dbNonce + 1)
var finalNonce uint64
if pendingNonce > nextDbNonce {
finalNonce = pendingNonce
} else {
finalNonce = nextDbNonce
}
log.Info("nonce initialization", "address", s.transactionSigner.GetAddr().Hex(), "maxDbNonce", dbNonce, "nextDbNonce", nextDbNonce, "pendingNonce", pendingNonce, "finalNonce", finalNonce)
return finalNonce, nil
}
// resetNonce reset nonce if send signed tx failed.
func (s *Sender) resetNonce() error {
nonce, err := s.initializeNonce()
if err != nil {
log.Error("failed to reset nonce", "address", s.transactionSigner.GetAddr().String(), "err", err)
return fmt.Errorf("failed to reset nonce, err: %w", err)
}
log.Info("reset nonce", "address", s.transactionSigner.GetAddr().String(), "nonce", nonce)
s.transactionSigner.SetNonce(nonce)
return nil
}
func (s *Sender) createReplacingTransaction(tx *gethTypes.Transaction, baseFee, blobBaseFee uint64) (*gethTypes.Transaction, error) {
@@ -612,6 +646,16 @@ func (s *Sender) checkPendingTransaction() {
}
if err := s.client.SendTransaction(s.ctx, newSignedTx); err != nil {
if strings.Contains(err.Error(), "nonce too low") {
// When we receive a 'nonce too low' error but cannot find the transaction receipt, it indicates another transaction with this nonce has already been processed, so this transaction will never be mined and should be marked as failed.
log.Warn("nonce too low detected, marking all non-confirmed transactions with same nonce as failed", "nonce", originalTx.Nonce(), "address", s.transactionSigner.GetAddr().Hex(), "txHash", originalTx.Hash().Hex(), "newTxHash", newSignedTx.Hash().Hex(), "err", err)
txHashes := []string{originalTx.Hash().Hex(), newSignedTx.Hash().Hex()}
if updateErr := s.pendingTransactionOrm.UpdateTransactionStatusByTxHashes(s.ctx, txHashes, types.TxStatusConfirmedFailed); updateErr != nil {
log.Error("failed to update transaction status", "hashes", txHashes, "err", updateErr)
return
}
return
}
// SendTransaction failed, need to rollback the previous database changes
if rollbackErr := s.db.Transaction(func(tx *gorm.DB) error {
// Restore original transaction status back to pending

View File

@@ -9,9 +9,10 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/da-codec/encoding"
"gorm.io/gorm"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
@@ -97,7 +98,7 @@ func (p *BundleProposer) TryProposeBundle() {
}
}
func (p *BundleProposer) updateDBBundleInfo(batches []*orm.Batch, codecVersion encoding.CodecVersion) error {
func (p *BundleProposer) UpdateDBBundleInfo(batches []*orm.Batch, codecVersion encoding.CodecVersion) error {
if len(batches) == 0 {
return nil
}
@@ -187,7 +188,7 @@ func (p *BundleProposer) proposeBundle() error {
p.bundleFirstBlockTimeoutReached.Inc()
p.bundleBatchesNum.Set(float64(len(batches)))
return p.updateDBBundleInfo(batches, codecVersion)
return p.UpdateDBBundleInfo(batches, codecVersion)
}
currentTimeSec := uint64(time.Now().Unix())
@@ -201,7 +202,7 @@ func (p *BundleProposer) proposeBundle() error {
p.bundleFirstBlockTimeoutReached.Inc()
p.bundleBatchesNum.Set(float64(len(batches)))
return p.updateDBBundleInfo(batches, codecVersion)
return p.UpdateDBBundleInfo(batches, codecVersion)
}
log.Debug("pending batches are not enough and do not contain a timeout batch")

View File

@@ -142,7 +142,7 @@ func (p *ChunkProposer) SetReplayDB(replayDB *gorm.DB) {
// TryProposeChunk tries to propose a new chunk.
func (p *ChunkProposer) TryProposeChunk() {
p.chunkProposerCircleTotal.Inc()
if err := p.proposeChunk(); err != nil {
if err := p.ProposeChunk(); err != nil {
p.proposeChunkFailureTotal.Inc()
log.Error("propose new chunk failed", "err", err)
return
@@ -225,7 +225,7 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
return nil
}
func (p *ChunkProposer) proposeChunk() error {
func (p *ChunkProposer) ProposeChunk() error {
// unchunkedBlockHeight >= 1, assuming genesis batch with chunk 0, block 0 is committed.
unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx)
if err != nil {
@@ -268,13 +268,9 @@ func (p *ChunkProposer) proposeChunk() error {
return fmt.Errorf("failed to get parent chunk: %w", err)
}
// Currently rollup-relayer only supports >= v7 codec version, it checks the minimum codec version after start.
// In EuclidV2 transition, empty PostL1MessageQueueHash will be naturally initialized to the first chunk's PrevL1MessageQueueHash.
chunk.PrevL1MessageQueueHash = common.HexToHash(parentChunk.PostL1MessageQueueHash)
// previous chunk is not CodecV7, this means this is the first chunk of the fork.
if encoding.CodecVersion(parentChunk.CodecVersion) < codecVersion {
chunk.PrevL1MessageQueueHash = common.Hash{}
}
chunk.PostL1MessageQueueHash = chunk.PrevL1MessageQueueHash
var previousPostL1MessageQueueHash common.Hash

View File

@@ -59,12 +59,12 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
const blocksFetchLimit = uint64(10)
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) error {
w.metrics.fetchRunningMissingBlocksTotal.Inc()
heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx)
if err != nil {
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
return
return fmt.Errorf("failed to GetL2BlocksLatestHeight: %w", err)
}
// Fetch and store block traces for missing blocks
@@ -75,22 +75,24 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
to = blockHeight
}
if err = w.getAndStoreBlocks(w.ctx, from, to); err != nil {
if err = w.GetAndStoreBlocks(w.ctx, from, to); err != nil {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
return fmt.Errorf("fail to getAndStoreBlockTraces: %w", err)
}
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
w.metrics.rollupL2BlocksFetchedGap.Set(float64(blockHeight - to))
}
return nil
}
func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64) error {
func (w *L2WatcherClient) GetAndStoreBlocks(ctx context.Context, from, to uint64) error {
var blocks []*encoding.Block
for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number)
block, err := w.GetBlockByNumberOrHash(ctx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(number)))
block, err := w.BlockByNumber(ctx, new(big.Int).SetUint64(number))
if err != nil {
return fmt.Errorf("failed to GetBlockByNumberOrHash: %v. number: %v", err, number)
return fmt.Errorf("failed to BlockByNumber: %v. number: %v", err, number)
}
var count int

View File

@@ -5,12 +5,15 @@ import (
"encoding/json"
"errors"
"fmt"
"math/big"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
@@ -263,6 +266,19 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
return &batch, nil
}
// GetBatchByHash retrieves the batch by the given hash.
func (o *Batch) GetBatchByHash(ctx context.Context, hash string) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash = ?", hash)
var batch Batch
if err := db.First(&batch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchByHash error: %w, batch hash: %v", err, hash)
}
return &batch, nil
}
// InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVersion encoding.CodecVersion, metrics rutils.BatchMetrics, dbTX ...*gorm.DB) (*Batch, error) {
if batch == nil {
@@ -338,6 +354,37 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
return &newBatch, nil
}
func (o *Batch) InsertPermissionlessBatch(ctx context.Context, batchIndex *big.Int, batchHash common.Hash, codecVersion encoding.CodecVersion, chunk *Chunk) (*Batch, error) {
now := time.Now()
newBatch := &Batch{
Index: batchIndex.Uint64(),
Hash: batchHash.Hex(),
StartChunkIndex: chunk.Index,
StartChunkHash: chunk.Hash,
EndChunkIndex: chunk.Index,
EndChunkHash: chunk.Hash,
StateRoot: chunk.StateRoot,
PrevL1MessageQueueHash: chunk.PrevL1MessageQueueHash,
PostL1MessageQueueHash: chunk.PostL1MessageQueueHash,
BatchHeader: []byte{1, 2, 3},
CodecVersion: int16(codecVersion),
EnableCompress: false,
ProvingStatus: int16(types.ProvingTaskVerified),
ProvedAt: &now,
RollupStatus: int16(types.RollupFinalized),
FinalizedAt: &now,
}
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
if err := db.Create(newBatch).Error; err != nil {
return nil, fmt.Errorf("Batch.InsertPermissionlessBatch error: %w", err)
}
return newBatch, nil
}
// UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
@@ -366,6 +413,29 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
return nil
}
func (o *Batch) UpdateRollupStatusCommitAndFinalizeTxHash(ctx context.Context, hash string, status types.RollupStatus, commitTxHash string, finalizeTxHash string, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["commit_tx_hash"] = commitTxHash
updateFields["committed_at"] = utils.NowUTC()
updateFields["finalize_tx_hash"] = finalizeTxHash
updateFields["finalized_at"] = utils.NowUTC()
updateFields["rollup_status"] = int(status)
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateRollupStatusCommitAndFinalizeTxHash error: %w, batch hash: %v, status: %v, commitTxHash: %v, finalizeTxHash: %v", err, hash, status.String(), commitTxHash, finalizeTxHash)
}
return nil
}
// UpdateRollupStatus updates the rollup status of a batch.
func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})

View File

@@ -59,8 +59,8 @@ func (*Bundle) TableName() string {
return "bundle"
}
// getLatestBundle retrieves the latest bundle from the database.
func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) {
// GetLatestBundle retrieves the latest bundle from the database.
func (o *Bundle) GetLatestBundle(ctx context.Context) (*Bundle, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Order("index desc")
@@ -70,7 +70,7 @@ func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("getLatestBundle error: %w", err)
return nil, fmt.Errorf("GetLatestBundle error: %w", err)
}
return &latestBundle, nil
}
@@ -106,7 +106,7 @@ func (o *Bundle) GetBundles(ctx context.Context, fields map[string]interface{},
// GetFirstUnbundledBatchIndex retrieves the first unbundled batch index.
func (o *Bundle) GetFirstUnbundledBatchIndex(ctx context.Context) (uint64, error) {
// Get the latest bundle
latestBundle, err := o.getLatestBundle(ctx)
latestBundle, err := o.GetLatestBundle(ctx)
if err != nil {
return 0, fmt.Errorf("Bundle.GetFirstUnbundledBatchIndex error: %w", err)
}
@@ -237,14 +237,18 @@ func (o *Bundle) UpdateProvingStatus(ctx context.Context, hash string, status ty
// UpdateRollupStatus updates the rollup status for a bundle.
// only used in unit tests.
func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error {
func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["rollup_status"] = int(status)
if status == types.RollupFinalized {
updateFields["finalized_at"] = utils.NowUTC()
}
db := o.db.WithContext(ctx)
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash", hash)

View File

@@ -7,9 +7,12 @@ import (
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types"
"scroll-tech/common/utils"
@@ -275,6 +278,48 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
return &newChunk, nil
}
func (o *Chunk) InsertPermissionlessChunk(ctx context.Context, index uint64, codecVersion encoding.CodecVersion, daBlobPayload encoding.DABlobPayload, totalL1MessagePoppedBefore uint64, stateRoot common.Hash) (*Chunk, error) {
// Create some unique identifier. It is not really used for anything except in DB.
var chunkBytes []byte
for _, block := range daBlobPayload.Blocks() {
blockBytes := block.Encode()
chunkBytes = append(chunkBytes, blockBytes...)
}
hash := crypto.Keccak256Hash(chunkBytes)
numBlocks := len(daBlobPayload.Blocks())
emptyHash := common.Hash{}.Hex()
newChunk := &Chunk{
Index: index,
Hash: hash.Hex(),
StartBlockNumber: daBlobPayload.Blocks()[0].Number(),
StartBlockHash: emptyHash,
EndBlockNumber: daBlobPayload.Blocks()[numBlocks-1].Number(),
EndBlockHash: emptyHash,
StartBlockTime: daBlobPayload.Blocks()[0].Timestamp(),
TotalL1MessagesPoppedInChunk: 0, // this needs to be 0 so that the calculation of the total L1 messages popped before for the next chunk is correct
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
PrevL1MessageQueueHash: daBlobPayload.PrevL1MessageQueueHash().Hex(),
PostL1MessageQueueHash: daBlobPayload.PostL1MessageQueueHash().Hex(),
ParentChunkHash: emptyHash,
StateRoot: stateRoot.Hex(),
ParentChunkStateRoot: emptyHash,
WithdrawRoot: emptyHash,
CodecVersion: int16(codecVersion),
EnableCompress: false,
ProvingStatus: int16(types.ProvingTaskVerified),
}
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
if err := db.Create(newChunk).Error; err != nil {
return nil, fmt.Errorf("Chunk. InsertPermissionlessChunk error: %w, chunk hash: %v", err, newChunk.Hash)
}
return newChunk, nil
}
// InsertTestChunkForProposerTool inserts a new chunk into the database only for analysis usage by proposer tool.
func (o *Chunk) InsertTestChunkForProposerTool(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, totalL1MessagePoppedBefore uint64, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {

View File

@@ -597,3 +597,61 @@ func TestPendingTransactionOrm(t *testing.T) {
err = pendingTransactionOrm.DeleteTransactionByTxHash(context.Background(), common.HexToHash("0x123"))
assert.Error(t, err) // Should return error for non-existent transaction
}
func TestPendingTransaction_GetMaxNonceBySenderAddress(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
// When there are no transactions for this sender address, should return -1
maxNonce, err := pendingTransactionOrm.GetMaxNonceBySenderAddress(context.Background(), "0xdeadbeef")
assert.NoError(t, err)
assert.Equal(t, int64(-1), maxNonce)
// Insert two transactions with different nonces for the same sender address
senderMeta := &SenderMeta{
Name: "testName",
Service: "testService",
Address: common.HexToAddress("0xdeadbeef"),
Type: types.SenderTypeCommitBatch,
}
tx0 := gethTypes.NewTx(&gethTypes.DynamicFeeTx{
Nonce: 1,
To: &common.Address{},
Data: []byte{},
Gas: 21000,
AccessList: gethTypes.AccessList{},
Value: big.NewInt(0),
ChainID: big.NewInt(1),
GasTipCap: big.NewInt(0),
GasFeeCap: big.NewInt(1),
V: big.NewInt(0),
R: big.NewInt(0),
S: big.NewInt(0),
})
tx1 := gethTypes.NewTx(&gethTypes.DynamicFeeTx{
Nonce: 3,
To: &common.Address{},
Data: []byte{},
Gas: 22000,
AccessList: gethTypes.AccessList{},
Value: big.NewInt(0),
ChainID: big.NewInt(1),
GasTipCap: big.NewInt(1),
GasFeeCap: big.NewInt(2),
V: big.NewInt(0),
R: big.NewInt(0),
S: big.NewInt(0),
})
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx0, 0)
assert.NoError(t, err)
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx1, 0)
assert.NoError(t, err)
// Now the max nonce for this sender should be 3
maxNonce, err = pendingTransactionOrm.GetMaxNonceBySenderAddress(context.Background(), senderMeta.Address.String())
assert.NoError(t, err)
assert.Equal(t, int64(3), maxNonce)
}

View File

@@ -3,6 +3,7 @@ package orm
import (
"bytes"
"context"
"errors"
"fmt"
"time"
@@ -191,6 +192,25 @@ func (o *PendingTransaction) UpdateTransactionStatusByTxHash(ctx context.Context
return nil
}
// UpdateTransactionStatusByTxHashes updates the status of multiple transactions by their hashes in one SQL statement
func (o *PendingTransaction) UpdateTransactionStatusByTxHashes(ctx context.Context, txHashes []string, status types.TxStatus, dbTX ...*gorm.DB) error {
if len(txHashes) == 0 {
return nil
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&PendingTransaction{})
db = db.Where("hash IN ?", txHashes)
if err := db.Update("status", status).Error; err != nil {
return fmt.Errorf("failed to update transaction status for hashes %v to status %d: %w", txHashes, status, err)
}
return nil
}
// UpdateOtherTransactionsAsFailedByNonce updates the status of all transactions to TxStatusConfirmedFailed for a specific nonce and sender address, excluding a specified transaction hash.
func (o *PendingTransaction) UpdateOtherTransactionsAsFailedByNonce(ctx context.Context, senderAddress string, nonce uint64, hash common.Hash, dbTX ...*gorm.DB) error {
db := o.db
@@ -207,3 +227,27 @@ func (o *PendingTransaction) UpdateOtherTransactionsAsFailedByNonce(ctx context.
}
return nil
}
// GetMaxNonceBySenderAddress retrieves the maximum nonce for a specific sender address.
// Returns -1 if no transactions are found for the given address.
func (o *PendingTransaction) GetMaxNonceBySenderAddress(ctx context.Context, senderAddress string) (int64, error) {
var result struct {
Nonce int64 `gorm:"column:nonce"`
}
err := o.db.WithContext(ctx).
Model(&PendingTransaction{}).
Select("nonce").
Where("sender_address = ?", senderAddress).
Order("nonce DESC").
First(&result).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return -1, nil
}
return -1, fmt.Errorf("failed to get max nonce by sender address, address: %s, err: %w", senderAddress, err)
}
return result.Nonce, nil
}