mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
35 Commits
refactor/z
...
darwin-sha
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82f39aa9dc | ||
|
|
0c106636c2 | ||
|
|
c87c35629d | ||
|
|
8f273f7a69 | ||
|
|
17b7e5caee | ||
|
|
07dc2c91bb | ||
|
|
768226c71f | ||
|
|
74c197e85a | ||
|
|
f95eaec3ce | ||
|
|
c34883e0aa | ||
|
|
8960d0f32d | ||
|
|
76a9d6fb20 | ||
|
|
8b779ff49a | ||
|
|
d516949f39 | ||
|
|
3716c5a436 | ||
|
|
04a893a98f | ||
|
|
622ce7b0c9 | ||
|
|
2392d3458a | ||
|
|
a2e8b6a644 | ||
|
|
3567f5b62a | ||
|
|
61a3af538d | ||
|
|
c758aa6fa4 | ||
|
|
abe66c6f7e | ||
|
|
7196c5cab6 | ||
|
|
380dbba61d | ||
|
|
bc9bc5db36 | ||
|
|
beee0c286c | ||
|
|
0b32509a55 | ||
|
|
e679052df1 | ||
|
|
229809ad6f | ||
|
|
fb43e87608 | ||
|
|
54adbb3e77 | ||
|
|
664c042a14 | ||
|
|
1a739cd5a7 | ||
|
|
1b6886bb49 |
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v5.3.0
|
||||
L2GETH_TAG=scroll-v5.5.1
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
|
||||
@@ -8,10 +8,10 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sync v0.7.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.12.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
@@ -36,7 +36,7 @@ require (
|
||||
github.com/docker/docker v26.1.0+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
@@ -57,7 +57,7 @@ require (
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.4 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
@@ -65,7 +65,6 @@ require (
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
@@ -90,29 +89,28 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.2 // indirect
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/sethvargo/go-retry v0.2.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.11 // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/net v0.20.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.17.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
|
||||
@@ -23,8 +23,8 @@ github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
|
||||
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -61,7 +61,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0q
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -87,8 +86,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
|
||||
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
|
||||
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -176,8 +175,8 @@ github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXei
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
@@ -309,10 +308,12 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
|
||||
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
|
||||
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923 h1:QKgfS8G0btzg7nmFjSjllaxGkns3yg7g2/tG1nWExEI=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
@@ -339,14 +340,14 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
|
||||
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
|
||||
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
@@ -369,8 +370,8 @@ github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dz
|
||||
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA=
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
|
||||
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
|
||||
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
|
||||
@@ -383,21 +384,21 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -414,20 +415,18 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
|
||||
|
||||
@@ -407,7 +407,7 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// The transactions are sorted, thus we set the score as their indices.
|
||||
// The transactions are sorted, thus we set the score as their index.
|
||||
for _, tx := range txs {
|
||||
txBytes, err := json.Marshal(tx)
|
||||
if err != nil {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
)
|
||||
|
||||
@@ -63,17 +64,6 @@ func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]
|
||||
return forkHeights, forkHeightsMap, forkNameHeightMap
|
||||
}
|
||||
|
||||
// BlocksUntilFork returns the number of blocks until the next fork
|
||||
// returns 0 if there is no fork scheduled for the future
|
||||
func BlocksUntilFork(blockHeight uint64, forkHeights []uint64) uint64 {
|
||||
for _, forkHeight := range forkHeights {
|
||||
if forkHeight > blockHeight {
|
||||
return forkHeight - blockHeight
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// BlockRange returns the block range of the hard fork
|
||||
// Need ensure the forkHeights is incremental
|
||||
func BlockRange(currentForkHeight uint64, forkHeights []uint64) (from, to uint64) {
|
||||
@@ -87,3 +77,45 @@ func BlockRange(currentForkHeight uint64, forkHeights []uint64) (from, to uint64
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetHardforkName returns the name of the hardfork active at the given block height and timestamp.
|
||||
// It checks the chain configuration to determine which hardfork is active.
|
||||
func GetHardforkName(config *params.ChainConfig, blockHeight, blockTimestamp uint64) string {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return "homestead"
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return "bernoulli"
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return "curie"
|
||||
} else {
|
||||
return "darwin"
|
||||
}
|
||||
}
|
||||
|
||||
// GetCodecVersion returns the encoding codec version for the given block height and timestamp.
|
||||
// It determines the appropriate codec version based on the active hardfork.
|
||||
func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uint64) encoding.CodecVersion {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return encoding.CodecV0
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return encoding.CodecV1
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return encoding.CodecV2
|
||||
} else {
|
||||
return encoding.CodecV3
|
||||
}
|
||||
}
|
||||
|
||||
// GetMaxChunksPerBatch returns the maximum number of chunks allowed per batch for the given block height and timestamp.
|
||||
// This value may change depending on the active hardfork.
|
||||
func GetMaxChunksPerBatch(config *params.ChainConfig, blockHeight, blockTimestamp uint64) uint64 {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return 15
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return 15
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return 45
|
||||
} else {
|
||||
return 45
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,46 +33,6 @@ func TestCollectSortedForkBlocks(t *testing.T) {
|
||||
}, n)
|
||||
}
|
||||
|
||||
func TestBlocksUntilFork(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
block uint64
|
||||
forks []uint64
|
||||
expected uint64
|
||||
}{
|
||||
"NoFork": {
|
||||
block: 44,
|
||||
forks: []uint64{},
|
||||
expected: 0,
|
||||
},
|
||||
"BeforeFork": {
|
||||
block: 0,
|
||||
forks: []uint64{1, 5},
|
||||
expected: 1,
|
||||
},
|
||||
"OnFork": {
|
||||
block: 1,
|
||||
forks: []uint64{1, 5},
|
||||
expected: 4,
|
||||
},
|
||||
"OnLastFork": {
|
||||
block: 5,
|
||||
forks: []uint64{1, 5},
|
||||
expected: 0,
|
||||
},
|
||||
"AfterFork": {
|
||||
block: 5,
|
||||
forks: []uint64{1, 5},
|
||||
expected: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
require.Equal(t, test.expected, BlocksUntilFork(test.block, test.forks))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -4,7 +4,7 @@ go 1.21
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/bits-and-blooms/bitset v1.12.0
|
||||
github.com/bits-and-blooms/bitset v1.13.0
|
||||
github.com/docker/docker v26.1.0+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
@@ -13,7 +13,8 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
@@ -77,7 +78,7 @@ require (
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsevents v0.1.1 // indirect
|
||||
@@ -119,7 +120,7 @@ require (
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
@@ -182,7 +183,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.2 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
@@ -194,12 +195,12 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.4.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.11 // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
|
||||
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
|
||||
@@ -210,7 +211,7 @@ require (
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
@@ -229,17 +230,17 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/net v0.20.0 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/term v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.17.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
|
||||
@@ -70,8 +70,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
|
||||
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
@@ -212,8 +212,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
@@ -384,8 +384,8 @@ github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXei
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
|
||||
@@ -633,10 +633,12 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
|
||||
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
|
||||
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923 h1:QKgfS8G0btzg7nmFjSjllaxGkns3yg7g2/tG1nWExEI=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=
|
||||
@@ -700,8 +702,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
|
||||
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
|
||||
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
|
||||
@@ -714,10 +716,12 @@ github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4D
|
||||
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375/go.mod h1:xRroudyp5iVtxKqZCrA6n2TLFRBf8bmnjr1UD4x+z7g=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 h1:ZT8ibgassurSISJ1Pj26NsM3vY2jxFZn63Nd/TpHmRw=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302/go.mod h1:9kMVqMyQ/Sx2df5LtnGG+nbrmiZzCS7V6gjW3oGHsvI=
|
||||
@@ -754,8 +758,9 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300 h1:DZH5n7L3L8RxKdSyJHZt7WePgwdhHnPhQFdQSJaHF+o=
|
||||
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300/go.mod h1:mOd4yUMgn2fe2nV9KXsa9AyQBFZGzygVPovsZR+Rl5w=
|
||||
github.com/zmap/zlint/v3 v3.5.0 h1:Eh2B5t6VKgVH0DFmTwOqE50POvyDhUaU9T2mJOe1vfQ=
|
||||
@@ -815,8 +820,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
@@ -826,8 +831,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -846,8 +851,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
||||
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
|
||||
@@ -859,8 +864,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -901,21 +906,21 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -930,8 +935,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
286
common/libzkp/impl/Cargo.lock
generated
286
common/libzkp/impl/Cargo.lock
generated
@@ -39,9 +39,9 @@ dependencies = [
|
||||
"ctor",
|
||||
"encoder",
|
||||
"env_logger 0.10.0",
|
||||
"eth-types",
|
||||
"eth-types 0.11.0",
|
||||
"ethers-core",
|
||||
"gadgets",
|
||||
"gadgets 0.11.0",
|
||||
"halo2-base",
|
||||
"halo2-ecc",
|
||||
"halo2_proofs",
|
||||
@@ -59,7 +59,41 @@ dependencies = [
|
||||
"snark-verifier-sdk",
|
||||
"strum 0.25.0",
|
||||
"strum_macros 0.25.3",
|
||||
"zkevm-circuits",
|
||||
"zkevm-circuits 0.11.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.12.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0-rc.3#706ba9ee7292b8e15a4fa0d4634a65dffa999402"
|
||||
dependencies = [
|
||||
"ark-std 0.3.0",
|
||||
"bitstream-io",
|
||||
"c-kzg",
|
||||
"ctor",
|
||||
"encoder",
|
||||
"env_logger 0.10.0",
|
||||
"eth-types 0.12.0",
|
||||
"ethers-core",
|
||||
"gadgets 0.12.0",
|
||||
"halo2-base",
|
||||
"halo2-ecc",
|
||||
"halo2_proofs",
|
||||
"hex",
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"num-bigint",
|
||||
"once_cell",
|
||||
"rand",
|
||||
"revm-precompile",
|
||||
"revm-primitives",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snark-verifier",
|
||||
"snark-verifier-sdk",
|
||||
"strum 0.25.0",
|
||||
"strum_macros 0.25.3",
|
||||
"zkevm-circuits 0.12.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -539,18 +573,18 @@ name = "bus-mapping"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"eth-types 0.11.0",
|
||||
"ethers-core",
|
||||
"ethers-providers",
|
||||
"ethers-signers",
|
||||
"external-tracer",
|
||||
"gadgets",
|
||||
"external-tracer 0.11.0",
|
||||
"gadgets 0.11.0",
|
||||
"halo2_proofs",
|
||||
"hex",
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"mock",
|
||||
"mpt-zktrie",
|
||||
"mock 0.11.0",
|
||||
"mpt-zktrie 0.11.0",
|
||||
"num",
|
||||
"poseidon-circuit",
|
||||
"rand",
|
||||
@@ -561,6 +595,31 @@ dependencies = [
|
||||
"strum_macros 0.25.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.12.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0-rc.3#706ba9ee7292b8e15a4fa0d4634a65dffa999402"
|
||||
dependencies = [
|
||||
"eth-types 0.12.0",
|
||||
"ethers-core",
|
||||
"ethers-providers",
|
||||
"ethers-signers",
|
||||
"gadgets 0.12.0",
|
||||
"halo2_proofs",
|
||||
"hex",
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"mock 0.12.0",
|
||||
"mpt-zktrie 0.12.0",
|
||||
"num",
|
||||
"poseidon-circuit",
|
||||
"revm-precompile",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"strum 0.25.0",
|
||||
"strum_macros 0.25.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "byte-slice-cast"
|
||||
version = "1.2.2"
|
||||
@@ -1152,6 +1211,34 @@ dependencies = [
|
||||
"uint",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.12.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0-rc.3#706ba9ee7292b8e15a4fa0d4634a65dffa999402"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
"halo2curves",
|
||||
"hex",
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"num",
|
||||
"num-bigint",
|
||||
"poseidon-base",
|
||||
"regex",
|
||||
"revm-precompile",
|
||||
"revm-primitives",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"sha3 0.10.8",
|
||||
"strum 0.25.0",
|
||||
"strum_macros 0.25.3",
|
||||
"subtle",
|
||||
"uint",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ethabi"
|
||||
version = "18.0.0"
|
||||
@@ -1285,8 +1372,21 @@ name = "external-tracer"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
"eth-types 0.11.0",
|
||||
"geth-utils 0.11.0",
|
||||
"log",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_stacker",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.12.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0-rc.3#706ba9ee7292b8e15a4fa0d4634a65dffa999402"
|
||||
dependencies = [
|
||||
"eth-types 0.12.0",
|
||||
"geth-utils 0.12.0",
|
||||
"log",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -1467,7 +1567,19 @@ name = "gadgets"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"eth-types 0.11.0",
|
||||
"halo2_proofs",
|
||||
"poseidon-base",
|
||||
"sha3 0.10.8",
|
||||
"strum 0.25.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.12.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0-rc.3#706ba9ee7292b8e15a4fa0d4634a65dffa999402"
|
||||
dependencies = [
|
||||
"eth-types 0.12.0",
|
||||
"halo2_proofs",
|
||||
"poseidon-base",
|
||||
"sha3 0.10.8",
|
||||
@@ -1495,6 +1607,16 @@ dependencies = [
|
||||
"log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.12.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0-rc.3#706ba9ee7292b8e15a4fa0d4634a65dffa999402"
|
||||
dependencies = [
|
||||
"env_logger 0.10.0",
|
||||
"gobuild",
|
||||
"log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.10"
|
||||
@@ -2239,10 +2361,25 @@ name = "mock"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"eth-types 0.11.0",
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
"external-tracer",
|
||||
"external-tracer 0.11.0",
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.12.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0-rc.3#706ba9ee7292b8e15a4fa0d4634a65dffa999402"
|
||||
dependencies = [
|
||||
"eth-types 0.12.0",
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
"external-tracer 0.12.0",
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"rand",
|
||||
@@ -2254,7 +2391,21 @@ name = "mpt-zktrie"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"eth-types 0.11.0",
|
||||
"halo2curves",
|
||||
"hex",
|
||||
"log",
|
||||
"num-bigint",
|
||||
"poseidon-base",
|
||||
"zktrie",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.12.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0-rc.3#706ba9ee7292b8e15a4fa0d4634a65dffa999402"
|
||||
dependencies = [
|
||||
"eth-types 0.12.0",
|
||||
"halo2curves",
|
||||
"hex",
|
||||
"log",
|
||||
@@ -2726,14 +2877,14 @@ name = "prover"
|
||||
version = "0.11.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"aggregator 0.11.0",
|
||||
"anyhow",
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
"bus-mapping",
|
||||
"bus-mapping 0.11.0",
|
||||
"chrono",
|
||||
"dotenvy",
|
||||
"eth-types",
|
||||
"eth-types 0.11.0",
|
||||
"ethers-core",
|
||||
"git-version",
|
||||
"halo2_proofs",
|
||||
@@ -2741,7 +2892,7 @@ dependencies = [
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"log4rs",
|
||||
"mpt-zktrie",
|
||||
"mpt-zktrie 0.11.0",
|
||||
"num-bigint",
|
||||
"rand",
|
||||
"rand_xorshift",
|
||||
@@ -2752,7 +2903,41 @@ dependencies = [
|
||||
"sha2",
|
||||
"snark-verifier",
|
||||
"snark-verifier-sdk",
|
||||
"zkevm-circuits",
|
||||
"zkevm-circuits 0.11.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.12.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0-rc.3#706ba9ee7292b8e15a4fa0d4634a65dffa999402"
|
||||
dependencies = [
|
||||
"aggregator 0.12.0",
|
||||
"anyhow",
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
"bus-mapping 0.12.0",
|
||||
"chrono",
|
||||
"dotenvy",
|
||||
"eth-types 0.12.0",
|
||||
"ethers-core",
|
||||
"git-version",
|
||||
"halo2_proofs",
|
||||
"hex",
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"log4rs",
|
||||
"mpt-zktrie 0.12.0",
|
||||
"num-bigint",
|
||||
"rand",
|
||||
"rand_xorshift",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"serde_stacker",
|
||||
"sha2",
|
||||
"snark-verifier",
|
||||
"snark-verifier-sdk",
|
||||
"zkevm-circuits 0.12.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2958,7 +3143,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "revm-precompile"
|
||||
version = "7.0.0"
|
||||
source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#8543dd627348907773d8057807b6a310b276bb30"
|
||||
source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#36c304d9e9ba4e4b2d5468d91a6bd27210133b6a"
|
||||
dependencies = [
|
||||
"aurora-engine-modexp",
|
||||
"c-kzg",
|
||||
@@ -2974,7 +3159,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "revm-primitives"
|
||||
version = "4.0.0"
|
||||
source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#8543dd627348907773d8057807b6a310b276bb30"
|
||||
source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#36c304d9e9ba4e4b2d5468d91a6bd27210133b6a"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"auto_impl",
|
||||
@@ -3525,7 +3710,7 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
|
||||
[[package]]
|
||||
name = "snark-verifier"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#fe1f8906041ad323034881fbd808908250d44829"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#58c46b74c73156b9e09dc27617369d2acfb4461b"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"ethereum-types",
|
||||
@@ -3548,7 +3733,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "snark-verifier-sdk"
|
||||
version = "0.0.1"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#fe1f8906041ad323034881fbd808908250d44829"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#58c46b74c73156b9e09dc27617369d2acfb4461b"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"ethereum-types",
|
||||
@@ -4364,14 +4549,14 @@ version = "0.11.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
"bus-mapping 0.11.0",
|
||||
"either",
|
||||
"env_logger 0.10.0",
|
||||
"eth-types",
|
||||
"eth-types 0.11.0",
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
"ff",
|
||||
"gadgets",
|
||||
"gadgets 0.11.0",
|
||||
"halo2-base",
|
||||
"halo2-ecc",
|
||||
"halo2-mpt-circuits",
|
||||
@@ -4381,8 +4566,50 @@ dependencies = [
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"misc-precompiled-circuit",
|
||||
"mock",
|
||||
"mpt-zktrie",
|
||||
"mock 0.11.0",
|
||||
"mpt-zktrie 0.11.0",
|
||||
"num",
|
||||
"num-bigint",
|
||||
"poseidon-circuit",
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"rand_xorshift",
|
||||
"rayon",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha3 0.10.8",
|
||||
"snark-verifier",
|
||||
"snark-verifier-sdk",
|
||||
"strum 0.25.0",
|
||||
"strum_macros 0.25.3",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.12.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.0-rc.3#706ba9ee7292b8e15a4fa0d4634a65dffa999402"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping 0.12.0",
|
||||
"either",
|
||||
"env_logger 0.10.0",
|
||||
"eth-types 0.12.0",
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
"ff",
|
||||
"gadgets 0.12.0",
|
||||
"halo2-base",
|
||||
"halo2-ecc",
|
||||
"halo2-mpt-circuits",
|
||||
"halo2_gadgets",
|
||||
"halo2_proofs",
|
||||
"hex",
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"misc-precompiled-circuit",
|
||||
"mock 0.12.0",
|
||||
"mpt-zktrie 0.12.0",
|
||||
"num",
|
||||
"num-bigint",
|
||||
"poseidon-circuit",
|
||||
@@ -4410,7 +4637,8 @@ dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"once_cell",
|
||||
"prover",
|
||||
"prover 0.11.0",
|
||||
"prover 0.12.0",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
|
||||
@@ -25,7 +25,11 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.4", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
|
||||
# curie
|
||||
prover_v3 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.4", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
# darwin
|
||||
prover_v4 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.0-rc.3", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -6,18 +6,20 @@ use crate::{
|
||||
},
|
||||
};
|
||||
use libc::c_char;
|
||||
use prover::{
|
||||
aggregator::{Prover, Verifier},
|
||||
use prover_v3::BatchProof as BatchProofV3;
|
||||
use prover_v4::{
|
||||
aggregator::{Prover, Verifier as VerifierV4},
|
||||
check_chunk_hashes,
|
||||
consts::AGG_VK_FILENAME,
|
||||
consts::BATCH_VK_FILENAME,
|
||||
utils::{chunk_trace_to_witness_block, init_env_and_log},
|
||||
BatchProof, BatchProvingTask, BlockTrace, ChunkInfo, ChunkProof,
|
||||
BatchHeader, BatchProof as BatchProofV4, BatchProvingTask, BlockTrace, BundleProof,
|
||||
BundleProvingTask, ChunkInfo, ChunkProof, MAX_AGG_SNARKS,
|
||||
};
|
||||
use snark_verifier_sdk::verify_evm_calldata;
|
||||
use std::{cell::OnceCell, env, ptr::null};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
|
||||
static mut VERIFIER_V4: OnceCell<VerifierV4> = OnceCell::new();
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
@@ -31,8 +33,8 @@ pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
|
||||
// VK file must exist, it is optional and logged as a warning in prover.
|
||||
if !file_exists(assets_dir, &AGG_VK_FILENAME) {
|
||||
panic!("{} must exist in folder {}", *AGG_VK_FILENAME, assets_dir);
|
||||
if !file_exists(assets_dir, &BATCH_VK_FILENAME) {
|
||||
panic!("{} must exist in folder {}", *BATCH_VK_FILENAME, assets_dir);
|
||||
}
|
||||
|
||||
let prover = Prover::from_dirs(params_dir, assets_dir);
|
||||
@@ -50,15 +52,26 @@ pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_d
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier = Verifier::from_dirs(params_dir, assets_dir);
|
||||
let verifier_v4 = VerifierV4::from_dirs(params_dir, assets_dir);
|
||||
|
||||
VERIFIER.set(verifier).unwrap();
|
||||
VERIFIER_V4.set(verifier_v4).unwrap();
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
|
||||
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
|
||||
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_batch_vk());
|
||||
|
||||
vk_result
|
||||
.ok()
|
||||
.flatten()
|
||||
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn get_bundle_vk() -> *const c_char {
|
||||
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_bundle_vk());
|
||||
|
||||
vk_result
|
||||
.ok()
|
||||
@@ -104,15 +117,19 @@ pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *con
|
||||
pub unsafe extern "C" fn gen_batch_proof(
|
||||
chunk_hashes: *const c_char,
|
||||
chunk_proofs: *const c_char,
|
||||
batch_header: *const c_char,
|
||||
) -> *const c_char {
|
||||
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
|
||||
let chunk_hashes = c_char_to_vec(chunk_hashes);
|
||||
let chunk_proofs = c_char_to_vec(chunk_proofs);
|
||||
let batch_header = c_char_to_vec(batch_header);
|
||||
|
||||
let chunk_hashes = serde_json::from_slice::<Vec<ChunkInfo>>(&chunk_hashes)
|
||||
.map_err(|e| format!("failed to deserialize chunk hashes: {e:?}"))?;
|
||||
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
|
||||
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
|
||||
let batch_header = serde_json::from_slice::<BatchHeader<MAX_AGG_SNARKS>>(&batch_header)
|
||||
.map_err(|e| format!("failed to deserialize batch header: {e:?}"))?;
|
||||
|
||||
if chunk_hashes.len() != chunk_proofs.len() {
|
||||
return Err(format!("chunk hashes and chunk proofs lengths mismatch: chunk_hashes.len() = {}, chunk_proofs.len() = {}",
|
||||
@@ -126,12 +143,13 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).map_err(|e| format!("failed to check chunk info: {e:?}"))?;
|
||||
|
||||
let batch = BatchProvingTask {
|
||||
chunk_proofs
|
||||
chunk_proofs,
|
||||
batch_header,
|
||||
};
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.expect("failed to get mutable reference to PROVER.")
|
||||
.gen_agg_evm_proof(batch, None, OUTPUT_DIR.as_deref())
|
||||
.gen_batch_proof(batch, None, OUTPUT_DIR.as_deref())
|
||||
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
|
||||
|
||||
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
|
||||
@@ -159,30 +177,76 @@ pub unsafe extern "C" fn verify_batch_proof(
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let fork_id = match fork_name_str {
|
||||
"bernoulli" => 2,
|
||||
"curie" => 3,
|
||||
"darwin" => 4,
|
||||
_ => {
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as curie");
|
||||
3
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as darwin");
|
||||
4
|
||||
}
|
||||
};
|
||||
let verified = panic_catch(|| {
|
||||
if fork_id == 2 {
|
||||
// before upgrade#3(DA Compression)
|
||||
if fork_id == 3 {
|
||||
// As of upgrade #3 (Curie), we verify batch proofs on-chain (EVM).
|
||||
let proof = serde_json::from_slice::<BatchProofV3>(proof.as_slice()).unwrap();
|
||||
verify_evm_calldata(
|
||||
include_bytes!("plonk_verifier_0.10.3.bin").to_vec(),
|
||||
include_bytes!("plonk_verifier_0.11.4.bin").to_vec(),
|
||||
proof.calldata(),
|
||||
)
|
||||
} else {
|
||||
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
|
||||
// Post upgrade #4 (Darwin), batch proofs are not EVM-verifiable. Instead they are
|
||||
// halo2 proofs meant to be bundled recursively.
|
||||
let proof = serde_json::from_slice::<BatchProofV4>(proof.as_slice()).unwrap();
|
||||
VERIFIER_V4.get().unwrap().verify_batch_proof(&proof)
|
||||
}
|
||||
});
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn gen_bundle_proof(batch_proofs: *const c_char) -> *const c_char {
|
||||
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
|
||||
let batch_proofs = c_char_to_vec(batch_proofs);
|
||||
let batch_proofs = serde_json::from_slice::<Vec<BatchProofV4>>(&batch_proofs)
|
||||
.map_err(|e| format!("failed to deserialize batch proofs: {e:?}"))?;
|
||||
|
||||
let bundle = BundleProvingTask { batch_proofs };
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.expect("failed to get mutable reference to PROVER.")
|
||||
.gen_bundle_proof(bundle, None, OUTPUT_DIR.as_deref())
|
||||
.map_err(|e| format!("failed to generate bundle proof: {e:?}"))?;
|
||||
|
||||
serde_json::to_vec(&proof)
|
||||
.map_err(|e| format!("failed to serialize the bundle proof: {e:?}"))
|
||||
})
|
||||
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
|
||||
|
||||
let r = match proof_result {
|
||||
Ok(proof_bytes) => ProofResult {
|
||||
message: Some(proof_bytes),
|
||||
error: None,
|
||||
},
|
||||
Err(err) => ProofResult {
|
||||
message: None,
|
||||
error: Some(err),
|
||||
},
|
||||
};
|
||||
|
||||
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_bundle_proof(proof: *const c_char) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
let verified = panic_catch(|| VERIFIER_V4.get().unwrap().verify_bundle_proof(proof));
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
// This function is only used for debugging on Go side.
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
|
||||
@@ -6,16 +6,18 @@ use crate::{
|
||||
},
|
||||
};
|
||||
use libc::c_char;
|
||||
use prover::{
|
||||
use prover_v3::{zkevm::Verifier as VerifierV3, ChunkProof as ChunkProofV3};
|
||||
use prover_v4::{
|
||||
consts::CHUNK_VK_FILENAME,
|
||||
utils::init_env_and_log,
|
||||
zkevm::{Prover, Verifier},
|
||||
BlockTrace, ChunkProof, ChunkProvingTask,
|
||||
zkevm::{Prover, Verifier as VerifierV4},
|
||||
BlockTrace, ChunkProof as ChunkProofV4, ChunkProvingTask,
|
||||
};
|
||||
use std::{cell::OnceCell, env, ptr::null};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
|
||||
static mut VERIFIER_V3: OnceCell<VerifierV3> = OnceCell::new();
|
||||
static mut VERIFIER_V4: OnceCell<VerifierV4> = OnceCell::new();
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
@@ -48,9 +50,11 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier = Verifier::from_dirs(params_dir, assets_dir);
|
||||
let verifier_v3 = VerifierV3::from_dirs(params_dir, assets_dir);
|
||||
let verifier_v4 = VerifierV4::from_dirs(params_dir, assets_dir);
|
||||
|
||||
VERIFIER.set(verifier).unwrap();
|
||||
VERIFIER_V3.set(verifier_v3).unwrap();
|
||||
VERIFIER_V4.set(verifier_v4).unwrap();
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
@@ -99,10 +103,29 @@ pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_chunk_proof(proof: *const c_char) -> c_char {
|
||||
pub unsafe extern "C" fn verify_chunk_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
|
||||
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_chunk_proof(proof));
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let fork_id = match fork_name_str {
|
||||
"curie" => 3,
|
||||
"darwin" => 4,
|
||||
_ => {
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as darwin");
|
||||
4
|
||||
}
|
||||
};
|
||||
let verified = panic_catch(|| {
|
||||
if fork_id == 3 {
|
||||
let proof = serde_json::from_slice::<ChunkProofV3>(proof.as_slice()).unwrap();
|
||||
VERIFIER_V3.get().unwrap().verify_chunk_proof(proof)
|
||||
} else {
|
||||
let proof = serde_json::from_slice::<ChunkProofV4>(proof.as_slice()).unwrap();
|
||||
VERIFIER_V4.get().unwrap().verify_chunk_proof(proof)
|
||||
}
|
||||
});
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
Binary file not shown.
BIN
common/libzkp/impl/src/plonk_verifier_0.11.4.bin
Normal file
BIN
common/libzkp/impl/src/plonk_verifier_0.11.4.bin
Normal file
Binary file not shown.
@@ -1,15 +1,26 @@
|
||||
// BatchProver is used to:
|
||||
// - Batch a list of chunk proofs
|
||||
// - Bundle a list of batch proofs
|
||||
void init_batch_prover(char* params_dir, char* assets_dir);
|
||||
// BatchVerifier is used to:
|
||||
// - Verify a batch proof
|
||||
// - Verify a bundle proof
|
||||
void init_batch_verifier(char* params_dir, char* assets_dir);
|
||||
|
||||
char* get_batch_vk();
|
||||
char* check_chunk_proofs(char* chunk_proofs);
|
||||
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
|
||||
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs, char* batch_header);
|
||||
char verify_batch_proof(char* proof, char* fork_name);
|
||||
|
||||
char* get_bundle_vk();
|
||||
char* gen_bundle_proof(char* batch_proofs);
|
||||
char verify_bundle_proof(char* proof);
|
||||
|
||||
void init_chunk_prover(char* params_dir, char* assets_dir);
|
||||
void init_chunk_verifier(char* params_dir, char* assets_dir);
|
||||
char* get_chunk_vk();
|
||||
char* gen_chunk_proof(char* block_traces);
|
||||
char verify_chunk_proof(char* proof);
|
||||
char verify_chunk_proof(char* proof, char* fork_name);
|
||||
|
||||
char* block_traces_to_chunk_info(char* block_traces);
|
||||
void free_c_chars(char* ptr);
|
||||
|
||||
@@ -188,6 +188,31 @@ func (s ChunkProofsStatus) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// BatchProofsStatus describes the proving status of batches that belong to a bundle.
|
||||
type BatchProofsStatus int
|
||||
|
||||
const (
|
||||
// BatchProofsStatusUndefined represents an undefined batch proofs status
|
||||
BatchProofsStatusUndefined BatchProofsStatus = iota
|
||||
|
||||
// BatchProofsStatusPending means that some batches that belong to this bundle have not been proven
|
||||
BatchProofsStatusPending
|
||||
|
||||
// BatchProofsStatusReady means that all batches that belong to this bundle have been proven
|
||||
BatchProofsStatusReady
|
||||
)
|
||||
|
||||
func (s BatchProofsStatus) String() string {
|
||||
switch s {
|
||||
case BatchProofsStatusPending:
|
||||
return "BatchProofsStatusPending"
|
||||
case BatchProofsStatusReady:
|
||||
return "BatchProofsStatusReady"
|
||||
default:
|
||||
return fmt.Sprintf("Undefined BatchProofsStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// RollupStatus block_batch rollup_status (pending, committing, committed, commit_failed, finalizing, finalized, finalize_skipped, finalize_failed)
|
||||
type RollupStatus int
|
||||
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
// HardForkName the hard fork name
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type LegacyAuthMsg struct {
|
||||
// Message fields
|
||||
Identity *LegacyIdentity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// LegacyIdentity contains all the fields to be signed by the prover.
|
||||
type LegacyIdentity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *LegacyAuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *LegacyAuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *LegacyIdentity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
@@ -1,28 +1,11 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv3"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// ProofFailureType the proof failure type
|
||||
type ProofFailureType int
|
||||
|
||||
const (
|
||||
// ProofFailureUndefined the undefined type proof failure type
|
||||
ProofFailureUndefined ProofFailureType = iota
|
||||
// ProofFailurePanic proof failure for prover panic
|
||||
ProofFailurePanic
|
||||
// ProofFailureNoPanic proof failure for no prover panic
|
||||
ProofFailureNoPanic
|
||||
)
|
||||
|
||||
// RespStatus represents status code from prover to scroll
|
||||
@@ -35,7 +18,7 @@ const (
|
||||
StatusProofError
|
||||
)
|
||||
|
||||
// ProofType represents the type of prover.
|
||||
// ProofType represents the type of task.
|
||||
type ProofType uint8
|
||||
|
||||
func (r ProofType) String() string {
|
||||
@@ -44,6 +27,8 @@ func (r ProofType) String() string {
|
||||
return "proof type chunk"
|
||||
case ProofTypeBatch:
|
||||
return "proof type batch"
|
||||
case ProofTypeBundle:
|
||||
return "proof type bundle"
|
||||
default:
|
||||
return fmt.Sprintf("illegal proof type: %d", r)
|
||||
}
|
||||
@@ -52,93 +37,14 @@ func (r ProofType) String() string {
|
||||
const (
|
||||
// ProofTypeUndefined is an unknown proof type
|
||||
ProofTypeUndefined ProofType = iota
|
||||
// ProofTypeChunk is default prover, it only generates zk proof from traces.
|
||||
// ProofTypeChunk generates a proof for a ZkEvm chunk, where the inputs are the execution traces for blocks contained in the chunk. ProofTypeChunk is the default proof type.
|
||||
ProofTypeChunk
|
||||
// ProofTypeBatch generates zk proof from other zk proofs and aggregate them into one proof.
|
||||
// ProofTypeBatch generates zk proof from chunk proofs
|
||||
ProofTypeBatch
|
||||
// ProofTypeBundle generates zk proof from batch proofs
|
||||
ProofTypeBundle
|
||||
)
|
||||
|
||||
// GenerateToken generates token
|
||||
func GenerateToken() (string, error) {
|
||||
b := make([]byte, 16)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// ProofMsg is the data structure sent to the coordinator.
|
||||
type ProofMsg struct {
|
||||
*ProofDetail `json:"zkProof"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
|
||||
// Prover public key
|
||||
publicKey string
|
||||
}
|
||||
|
||||
// Sign signs the ProofMsg.
|
||||
func (a *ProofMsg) Sign(priv *ecdsa.PrivateKey) error {
|
||||
hash, err := a.ProofDetail.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies ProofMsg.Signature.
|
||||
func (a *ProofMsg) Verify() (bool, error) {
|
||||
hash, err := a.ProofDetail.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
if a.publicKey == "" {
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
|
||||
}
|
||||
|
||||
return crypto.VerifySignature(common.FromHex(a.publicKey), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *ProofMsg) PublicKey() (string, error) {
|
||||
if a.publicKey == "" {
|
||||
hash, err := a.ProofDetail.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
|
||||
return a.publicKey, nil
|
||||
}
|
||||
|
||||
return a.publicKey, nil
|
||||
}
|
||||
|
||||
// TaskMsg is a wrapper type around db ProveTask type.
|
||||
type TaskMsg struct {
|
||||
UUID string `json:"uuid"`
|
||||
ID string `json:"id"`
|
||||
Type ProofType `json:"type,omitempty"`
|
||||
BatchTaskDetail *BatchTaskDetail `json:"batch_task_detail,omitempty"`
|
||||
ChunkTaskDetail *ChunkTaskDetail `json:"chunk_task_detail,omitempty"`
|
||||
}
|
||||
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail.
|
||||
type ChunkTaskDetail struct {
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
@@ -146,30 +52,14 @@ type ChunkTaskDetail struct {
|
||||
|
||||
// BatchTaskDetail is a type containing BatchTask detail.
|
||||
type BatchTaskDetail struct {
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*ChunkProof `json:"chunk_proofs"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*ChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader *codecv3.DABatch `json:"batch_header"`
|
||||
}
|
||||
|
||||
// ProofDetail is the message received from provers that contains zk proof, the status of
|
||||
// the proof generation succeeded, and an error message if proof generation failed.
|
||||
type ProofDetail struct {
|
||||
ID string `json:"id"`
|
||||
Type ProofType `json:"type,omitempty"`
|
||||
Status RespStatus `json:"status"`
|
||||
ChunkProof *ChunkProof `json:"chunk_proof,omitempty"`
|
||||
BatchProof *BatchProof `json:"batch_proof,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// Hash return proofMsg content hash.
|
||||
func (z *ProofDetail) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(z)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
|
||||
type BundleTaskDetail struct {
|
||||
BatchProofs []*BatchProof `json:"batch_proofs"`
|
||||
}
|
||||
|
||||
// ChunkInfo is for calculating pi_hash for chunk
|
||||
@@ -204,15 +94,16 @@ type ChunkProof struct {
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
type BatchProof struct {
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether an BatchProof is in a legal format
|
||||
// TODO: change to check Proof&Instance when upgrading to snark verifier v0.4
|
||||
// SanityCheck checks whether a BatchProof is in a legal format
|
||||
func (ap *BatchProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
@@ -221,8 +112,51 @@ func (ap *BatchProof) SanityCheck() error {
|
||||
if len(ap.Proof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
|
||||
if len(ap.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer has wrong length, expected: 32, got: %d", len(ap.Proof))
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
type BundleProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BundleProof is in a legal format
|
||||
func (ap *BundleProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.Proof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
|
||||
if len(ap.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
privkey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
|
||||
authMsg := &AuthMsg{
|
||||
Identity: &Identity{
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzgxNzUsIm9yaWdfaWF0IjoxNjkxMDM0NTc1fQ.HybBMsEJFhyZqtIa2iVcHUP7CEFttf708jmTMAImAWA",
|
||||
ProverName: "test",
|
||||
ProverVersion: "v1.0.0",
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(privkey))
|
||||
|
||||
// Check public key.
|
||||
pk, err := authMsg.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
|
||||
|
||||
ok, err := authMsg.Verify()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Check public key is ok.
|
||||
pub, err := authMsg.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
pubkey := crypto.CompressPubkey(&privkey.PublicKey)
|
||||
assert.Equal(t, pub, common.Bytes2Hex(pubkey))
|
||||
}
|
||||
|
||||
func TestGenerateToken(t *testing.T) {
|
||||
token, err := GenerateToken()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 32, len(token))
|
||||
}
|
||||
|
||||
func TestIdentityHash(t *testing.T) {
|
||||
identity := &Identity{
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzM0MTksIm9yaWdfaWF0IjoxNjkxMDI5ODE5fQ.EhkLZsj__rNPVC3ZDYBtvdh0nB8mmM_Hl82hObaIWOs",
|
||||
ProverName: "test",
|
||||
ProverVersion: "v1.0.0",
|
||||
}
|
||||
|
||||
hash, err := identity.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
func TestProofMessageSignVerifyPublicKey(t *testing.T) {
|
||||
privkey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
|
||||
proofMsg := &ProofMsg{
|
||||
ProofDetail: &ProofDetail{
|
||||
ID: "testID",
|
||||
Type: ProofTypeChunk,
|
||||
Status: StatusOk,
|
||||
ChunkProof: &ChunkProof{
|
||||
StorageTrace: []byte("testStorageTrace"),
|
||||
Protocol: []byte("testProtocol"),
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
},
|
||||
Error: "testError",
|
||||
},
|
||||
}
|
||||
assert.NoError(t, proofMsg.Sign(privkey))
|
||||
|
||||
// Test when publicKey is not set.
|
||||
ok, err := proofMsg.Verify()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Test when publicKey is already set.
|
||||
ok, err = proofMsg.Verify()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestProofDetailHash(t *testing.T) {
|
||||
proofDetail := &ProofDetail{
|
||||
ID: "testID",
|
||||
Type: ProofTypeChunk,
|
||||
Status: StatusOk,
|
||||
ChunkProof: &ChunkProof{
|
||||
StorageTrace: []byte("testStorageTrace"),
|
||||
Protocol: []byte("testProtocol"),
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
},
|
||||
Error: "testError",
|
||||
}
|
||||
hash, err := proofDetail.Hash()
|
||||
assert.NoError(t, err)
|
||||
expectedHash := "01128ea9006601146ba80dbda959c96ebaefca463e78570e473a57d821db5ec1"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
func TestProveTypeString(t *testing.T) {
|
||||
proofTypeChunk := ProofType(1)
|
||||
assert.Equal(t, "proof type chunk", proofTypeChunk.String())
|
||||
|
||||
proofTypeBatch := ProofType(2)
|
||||
assert.Equal(t, "proof type batch", proofTypeBatch.String())
|
||||
|
||||
illegalProof := ProofType(3)
|
||||
assert.Equal(t, "illegal proof type: 3", illegalProof.String())
|
||||
}
|
||||
|
||||
func TestProofMsgPublicKey(t *testing.T) {
|
||||
privkey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
|
||||
proofMsg := &ProofMsg{
|
||||
ProofDetail: &ProofDetail{
|
||||
ID: "testID",
|
||||
Type: ProofTypeChunk,
|
||||
Status: StatusOk,
|
||||
ChunkProof: &ChunkProof{
|
||||
StorageTrace: []byte("testStorageTrace"),
|
||||
Protocol: []byte("testProtocol"),
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
},
|
||||
Error: "testError",
|
||||
},
|
||||
}
|
||||
assert.NoError(t, proofMsg.Sign(privkey))
|
||||
|
||||
// Test when publicKey is not set.
|
||||
pk, err := proofMsg.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
|
||||
|
||||
// Test when publicKey is already set.
|
||||
proofMsg.publicKey = common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey))
|
||||
pk, err = proofMsg.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
|
||||
}
|
||||
@@ -26,6 +26,9 @@ coordinator_api: libzkp ## Builds the Coordinator api instance.
|
||||
coordinator_cron:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
|
||||
|
||||
coordinator_tool:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
|
||||
|
||||
coordinator_api_skip_libzkp:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
@@ -54,4 +57,4 @@ docker:
|
||||
|
||||
docker_push:
|
||||
docker push scrolltech/coordinator-api:${IMAGE_VERSION}
|
||||
docker push scrolltech/coordinator-cron:${IMAGE_VERSION}
|
||||
docker push scrolltech/coordinator-cron:${IMAGE_VERSION}
|
||||
|
||||
101
coordinator/cmd/tool/tool.go
Normal file
101
coordinator/cmd/tool/tool.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up coordinator app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "coordinator-tool"
|
||||
app.Usage = "The Scroll L2 Coordinator"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
db, err := database.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
taskID := "fa9a290c8f1a46dc626fa67d626fadfe4803968ce776383996f3ae12504a2591"
|
||||
batches, err := batchOrm.GetBatchesByBundleHash(ctx.Context, taskID)
|
||||
if err != nil {
|
||||
log.Error("failed to get batch proofs for batch", "task_id", taskID, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(batches) == 0 {
|
||||
log.Error("failed to get batch proofs for bundle, not found batch", "task_id", taskID)
|
||||
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
|
||||
}
|
||||
|
||||
var batchProofs []*message.BatchProof
|
||||
for _, batch := range batches {
|
||||
var proof message.BatchProof
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
log.Error("failed to unmarshal proof")
|
||||
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
|
||||
}
|
||||
batchProofs = append(batchProofs, &proof)
|
||||
}
|
||||
|
||||
taskDetail := message.BundleTaskDetail{
|
||||
BatchProofs: batchProofs,
|
||||
}
|
||||
|
||||
batchProofsBytes, err := json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal proof")
|
||||
return fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", taskID, err)
|
||||
}
|
||||
|
||||
taskMsg := &coordinatorType.GetTaskSchema{
|
||||
TaskID: taskID,
|
||||
TaskType: int(message.ProofTypeBundle),
|
||||
TaskData: string(batchProofsBytes),
|
||||
}
|
||||
|
||||
log.Info("task_msg", "data", taskMsg)
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
// RunApp the coordinator.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
@@ -24,7 +25,7 @@
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
"challenge_expire_duration_sec": 10,
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ require (
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
@@ -37,20 +37,20 @@ require (
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
golang.org/x/net v0.20.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.12.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
@@ -58,27 +58,27 @@ require (
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.2 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/supranational/blst v0.3.11 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
@@ -7,8 +7,8 @@ github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS
|
||||
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
|
||||
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -43,8 +43,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
@@ -96,8 +96,8 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
@@ -173,12 +173,12 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570 h1:2oA2bAFPQXDZcUK8TA9qd5zj6AsURpHyBaAha5goP0c=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
|
||||
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
|
||||
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923 h1:QKgfS8G0btzg7nmFjSjllaxGkns3yg7g2/tG1nWExEI=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
@@ -196,8 +196,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
|
||||
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
|
||||
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||
@@ -206,10 +206,10 @@ github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||
@@ -221,8 +221,8 @@ github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6S
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
|
||||
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
@@ -232,8 +232,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -243,13 +243,13 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -265,10 +265,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
@@ -277,8 +275,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
|
||||
@@ -21,6 +21,8 @@ type ProverManager struct {
|
||||
BatchCollectionTimeSec int `json:"batch_collection_time_sec"`
|
||||
// ChunkCollectionTimeSec chunk Proof collection time (in seconds).
|
||||
ChunkCollectionTimeSec int `json:"chunk_collection_time_sec"`
|
||||
// BundleCollectionTimeSec bundle Proof collection time (in seconds).
|
||||
BundleCollectionTimeSec int `json:"bundle_collection_time_sec"`
|
||||
// Max number of workers in verifier worker pool
|
||||
MaxVerifierWorkers int `json:"max_verifier_workers"`
|
||||
// MinProverVersion is the minimum version of the prover that is required.
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/auth"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
@@ -19,9 +20,9 @@ type AuthController struct {
|
||||
}
|
||||
|
||||
// NewAuthController returns an LoginController instance
|
||||
func NewAuthController(db *gorm.DB) *AuthController {
|
||||
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
|
||||
return &AuthController{
|
||||
loginLogic: auth.NewLoginLogic(db),
|
||||
loginLogic: auth.NewLoginLogic(db, cfg, vf),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +37,11 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
// if not exist, the jwt token will intercept it
|
||||
brearToken := c.GetHeader("Authorization")
|
||||
if brearToken != "Bearer "+login.Message.Challenge {
|
||||
return "", fmt.Errorf("check challenge failure for the not equal challenge string")
|
||||
return "", errors.New("check challenge failure for the not equal challenge string")
|
||||
}
|
||||
|
||||
if err := a.loginLogic.Check(&login); err != nil {
|
||||
return "", fmt.Errorf("check the login parameter failure: %w", err)
|
||||
}
|
||||
|
||||
// check the challenge is used, if used, return failure
|
||||
@@ -53,44 +58,10 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
var publicKey string
|
||||
var err error
|
||||
if v.Message.HardForkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
HardForkName: v.Message.HardForkName,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
if v.Message.HardForkName == "" {
|
||||
v.Message.HardForkName = "shanghai"
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: publicKey,
|
||||
types.PublicKey: v.PublicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.HardForkName: v.Message.HardForkName,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,9 +79,5 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
c.Set(types.ProverVersion, proverVersion)
|
||||
}
|
||||
|
||||
if hardForkName, ok := claims[types.HardForkName]; ok {
|
||||
c.Set(types.HardForkName, hardForkName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
|
||||
|
||||
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
|
||||
|
||||
Auth = NewAuthController(db)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
|
||||
Auth = NewAuthController(db, cfg, vf)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg)
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/provertask"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
@@ -28,9 +27,10 @@ type GetTaskController struct {
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, reg)
|
||||
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, reg)
|
||||
|
||||
ptc := &GetTaskController{
|
||||
proverTasks: make(map[message.ProofType]provertask.ProverTask),
|
||||
@@ -42,7 +42,7 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
|
||||
|
||||
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
|
||||
ptc.proverTasks[message.ProofTypeBatch] = batchProverTask
|
||||
|
||||
ptc.proverTasks[message.ProofTypeBundle] = bundleProverTask
|
||||
return ptc
|
||||
}
|
||||
|
||||
@@ -106,18 +106,25 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
}
|
||||
|
||||
func (ptc *GetTaskController) proofType(para *coordinatorType.GetTaskParameter) message.ProofType {
|
||||
proofType := message.ProofType(para.TaskType)
|
||||
|
||||
proofTypes := []message.ProofType{
|
||||
message.ProofTypeChunk,
|
||||
message.ProofTypeBatch,
|
||||
var proofTypes []message.ProofType
|
||||
if para.TaskType != 0 {
|
||||
proofTypes = append(proofTypes, message.ProofType(para.TaskType))
|
||||
}
|
||||
|
||||
if proofType == message.ProofTypeUndefined {
|
||||
rand.Shuffle(len(proofTypes), func(i, j int) {
|
||||
proofTypes[i], proofTypes[j] = proofTypes[j], proofTypes[i]
|
||||
})
|
||||
proofType = proofTypes[0]
|
||||
for _, proofType := range para.TaskTypes {
|
||||
proofTypes = append(proofTypes, message.ProofType(proofType))
|
||||
}
|
||||
return proofType
|
||||
|
||||
if len(proofTypes) == 0 {
|
||||
proofTypes = []message.ProofType{
|
||||
message.ProofTypeChunk,
|
||||
message.ProofTypeBatch,
|
||||
message.ProofTypeBundle,
|
||||
}
|
||||
}
|
||||
|
||||
rand.Shuffle(len(proofTypes), func(i, j int) {
|
||||
proofTypes[i], proofTypes[j] = proofTypes[j], proofTypes[i]
|
||||
})
|
||||
return proofTypes[0]
|
||||
}
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/submitproof"
|
||||
@@ -23,9 +22,9 @@ type SubmitProofController struct {
|
||||
}
|
||||
|
||||
// NewSubmitProofController create the submit proof api controller instance
|
||||
func NewSubmitProofController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController {
|
||||
func NewSubmitProofController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController {
|
||||
return &SubmitProofController{
|
||||
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db, vf, reg),
|
||||
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, chainCfg, db, vf, reg),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,36 +37,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
proofMsg := message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: spp.TaskID,
|
||||
Type: message.ProofType(spp.TaskType),
|
||||
Status: message.RespStatus(spp.Status),
|
||||
},
|
||||
}
|
||||
|
||||
if spp.Status == int(message.StatusOk) {
|
||||
switch message.ProofType(spp.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
var tmpChunkProof message.ChunkProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
proofMsg.ChunkProof = &tmpChunkProof
|
||||
case message.ProofTypeBatch:
|
||||
var tmpBatchProof message.BatchProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
proofMsg.BatchProof = &tmpBatchProof
|
||||
}
|
||||
}
|
||||
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, spp); err != nil {
|
||||
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr)
|
||||
return
|
||||
|
||||
@@ -23,38 +23,55 @@ type Collector struct {
|
||||
db *gorm.DB
|
||||
ctx context.Context
|
||||
|
||||
stopChunkTimeoutChan chan struct{}
|
||||
stopBatchTimeoutChan chan struct{}
|
||||
stopBatchAllChunkReadyChan chan struct{}
|
||||
stopCleanChallengeChan chan struct{}
|
||||
stopBundleTimeoutChan chan struct{}
|
||||
stopChunkTimeoutChan chan struct{}
|
||||
stopBatchTimeoutChan chan struct{}
|
||||
stopBatchAllChunkReadyChan chan struct{}
|
||||
stopBundleAllBatchReadyChan chan struct{}
|
||||
stopCleanChallengeChan chan struct{}
|
||||
|
||||
proverTaskOrm *orm.ProverTask
|
||||
bundleOrm *orm.Bundle
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
challenge *orm.Challenge
|
||||
|
||||
timeoutBatchCheckerRunTotal prometheus.Counter
|
||||
batchProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutChunkCheckerRunTotal prometheus.Counter
|
||||
chunkProverTaskTimeoutTotal prometheus.Counter
|
||||
checkBatchAllChunkReadyRunTotal prometheus.Counter
|
||||
timeoutBundleCheckerRunTotal prometheus.Counter
|
||||
bundleProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutBatchCheckerRunTotal prometheus.Counter
|
||||
batchProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutChunkCheckerRunTotal prometheus.Counter
|
||||
chunkProverTaskTimeoutTotal prometheus.Counter
|
||||
checkBatchAllChunkReadyRunTotal prometheus.Counter
|
||||
checkBundleAllBatchReadyRunTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewCollector create a collector to cron collect the data to send to prover
|
||||
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector {
|
||||
c := &Collector{
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
stopChunkTimeoutChan: make(chan struct{}),
|
||||
stopBatchTimeoutChan: make(chan struct{}),
|
||||
stopBatchAllChunkReadyChan: make(chan struct{}),
|
||||
stopCleanChallengeChan: make(chan struct{}),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
challenge: orm.NewChallenge(db),
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
stopBundleTimeoutChan: make(chan struct{}),
|
||||
stopChunkTimeoutChan: make(chan struct{}),
|
||||
stopBatchTimeoutChan: make(chan struct{}),
|
||||
stopBatchAllChunkReadyChan: make(chan struct{}),
|
||||
stopBundleAllBatchReadyChan: make(chan struct{}),
|
||||
stopCleanChallengeChan: make(chan struct{}),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
challenge: orm.NewChallenge(db),
|
||||
|
||||
timeoutBundleCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_bundle_timeout_checker_run_total",
|
||||
Help: "Total number of bundle timeout checker run.",
|
||||
}),
|
||||
bundleProverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_bundle_prover_task_timeout_total",
|
||||
Help: "Total number of bundle timeout prover task.",
|
||||
}),
|
||||
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_timeout_checker_run_total",
|
||||
Help: "Total number of batch timeout checker run.",
|
||||
@@ -75,11 +92,17 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
Name: "coordinator_check_batch_all_chunk_ready_run_total",
|
||||
Help: "Total number of check batch all chunks ready total",
|
||||
}),
|
||||
checkBundleAllBatchReadyRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_check_bundle_all_batch_ready_run_total",
|
||||
Help: "Total number of check bundle all batches ready total",
|
||||
}),
|
||||
}
|
||||
|
||||
go c.timeoutBundleProofTask()
|
||||
go c.timeoutBatchProofTask()
|
||||
go c.timeoutChunkProofTask()
|
||||
go c.checkBatchAllChunkReady()
|
||||
go c.checkBundleAllBatchReady()
|
||||
go c.cleanupChallenge()
|
||||
|
||||
log.Info("Start coordinator cron successfully.")
|
||||
@@ -91,10 +114,45 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
func (c *Collector) Stop() {
|
||||
c.stopChunkTimeoutChan <- struct{}{}
|
||||
c.stopBatchTimeoutChan <- struct{}{}
|
||||
c.stopBundleTimeoutChan <- struct{}{}
|
||||
c.stopBatchAllChunkReadyChan <- struct{}{}
|
||||
c.stopCleanChallengeChan <- struct{}{}
|
||||
}
|
||||
|
||||
// timeoutBundleProofTask cron checks the send task is timeout. if timeout reached, restore the
|
||||
// bundle task to unassigned. then the bundle collector can retry it.
|
||||
func (c *Collector) timeoutBundleProofTask() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("timeout bundle proof task panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second * 2)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.timeoutBundleCheckerRunTotal.Inc()
|
||||
timeout := time.Duration(c.cfg.ProverManager.BundleCollectionTimeSec) * time.Second
|
||||
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, message.ProofTypeBundle, timeout)
|
||||
if err != nil {
|
||||
log.Error("get unassigned session info failure", "error", err)
|
||||
break
|
||||
}
|
||||
c.check(assignedProverTasks, c.bundleProverTaskTimeoutTotal)
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopBundleTimeoutChan:
|
||||
log.Info("the coordinator timeoutBundleProofTask run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// timeoutBatchProofTask cron check the send task is timeout. if timeout reached, restore the
|
||||
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
|
||||
func (c *Collector) timeoutBatchProofTask() {
|
||||
@@ -202,6 +260,16 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
|
||||
log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
case message.ProofTypeBundle:
|
||||
if err := c.bundleOrm.DecreaseActiveAttemptsByHash(c.ctx, assignedProverTask.TaskID, tx); err != nil {
|
||||
log.Error("decrease bundle active attempts failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.bundleOrm.UpdateProvingStatusFailed(c.ctx, assignedProverTask.TaskID, c.cfg.ProverManager.SessionAttempts, tx); err != nil {
|
||||
log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -268,3 +336,60 @@ func (c *Collector) checkBatchAllChunkReady() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) checkBundleAllBatchReady() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("check batch all batches ready panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second * 10)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.checkBundleAllBatchReadyRunTotal.Inc()
|
||||
page := 1
|
||||
pageSize := 50
|
||||
for {
|
||||
offset := (page - 1) * pageSize
|
||||
bundles, err := c.bundleOrm.GetUnassignedAndBatchesUnreadyBundles(c.ctx, offset, pageSize)
|
||||
if err != nil {
|
||||
log.Warn("checkBundleAllBatchReady GetUnassignedAndBatchesUnreadyBundles", "error", err)
|
||||
break
|
||||
}
|
||||
|
||||
for _, bundle := range bundles {
|
||||
allReady, checkErr := c.batchOrm.CheckIfBundleBatchProofsAreReady(c.ctx, bundle.Hash)
|
||||
if checkErr != nil {
|
||||
log.Warn("checkBatchAllChunkReady CheckIfBatchChunkProofsAreReady failure", "error", checkErr, "hash", bundle.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if !allReady {
|
||||
continue
|
||||
}
|
||||
|
||||
if updateErr := c.bundleOrm.UpdateBatchProofsStatusByBatchHash(c.ctx, bundle.Hash, types.BatchProofsStatusReady); updateErr != nil {
|
||||
log.Warn("checkBundleAllBatchReady UpdateBatchProofsStatusByBatchHash failure", "error", checkErr, "hash", bundle.Hash)
|
||||
}
|
||||
}
|
||||
|
||||
if len(bundles) < pageSize {
|
||||
break
|
||||
}
|
||||
page++
|
||||
}
|
||||
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopBundleAllBatchReadyChan:
|
||||
log.Info("the coordinator checkBundleAllBatchReady run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,25 +1,104 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// LoginLogic the auth logic
|
||||
type LoginLogic struct {
|
||||
cfg *config.Config
|
||||
challengeOrm *orm.Challenge
|
||||
chunkVks map[string]struct{}
|
||||
batchVKs map[string]struct{}
|
||||
bundleVks map[string]struct{}
|
||||
}
|
||||
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB) *LoginLogic {
|
||||
return &LoginLogic{
|
||||
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
|
||||
l := &LoginLogic{
|
||||
cfg: cfg,
|
||||
chunkVks: make(map[string]struct{}),
|
||||
batchVKs: make(map[string]struct{}),
|
||||
bundleVks: make(map[string]struct{}),
|
||||
challengeOrm: orm.NewChallenge(db),
|
||||
}
|
||||
|
||||
for _, vk := range vf.ChunkVKMap {
|
||||
l.chunkVks[vk] = struct{}{}
|
||||
}
|
||||
|
||||
for _, vk := range vf.BatchVKMap {
|
||||
l.batchVKs[vk] = struct{}{}
|
||||
}
|
||||
|
||||
for _, vk := range vf.BundleVkMap {
|
||||
l.bundleVks[vk] = struct{}{}
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
|
||||
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
|
||||
}
|
||||
|
||||
func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
if login.PublicKey != "" {
|
||||
verify, err := login.Verify()
|
||||
if err != nil || !verify {
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
}
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
l.cfg.ProverManager.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
if len(login.Message.ProverTypes) > 0 {
|
||||
vks := make(map[string]struct{})
|
||||
for _, proverType := range login.Message.ProverTypes {
|
||||
switch proverType {
|
||||
case types.ProverTypeChunk:
|
||||
for vk := range l.chunkVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
case types.ProverTypeBatch:
|
||||
for vk := range l.batchVKs {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
for vk := range l.bundleVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
default:
|
||||
log.Error("invalid prover_type", "value", proverType)
|
||||
}
|
||||
}
|
||||
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return fmt.Errorf("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv3"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
@@ -35,18 +35,13 @@ type BatchProverTask struct {
|
||||
}
|
||||
|
||||
// NewBatchProverTask new a batch collector
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
|
||||
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
|
||||
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask {
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vkMap: vkMap,
|
||||
reverseVkMap: reverseMap(vkMap),
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
forkHeights: forkHeights,
|
||||
chainCfg: chainCfg,
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
@@ -65,38 +60,20 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
return bp
|
||||
}
|
||||
|
||||
type chunkIndexRange struct {
|
||||
start uint64
|
||||
end uint64
|
||||
}
|
||||
|
||||
func (r *chunkIndexRange) merge(o chunkIndexRange) *chunkIndexRange {
|
||||
var start, end = r.start, r.end
|
||||
if o.start < r.start {
|
||||
start = o.start
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
if o.end > r.end {
|
||||
end = o.end
|
||||
}
|
||||
return &chunkIndexRange{start, end}
|
||||
}
|
||||
|
||||
func (r *chunkIndexRange) contains(start, end uint64) bool {
|
||||
return r.start <= start && r.end > end
|
||||
}
|
||||
|
||||
type getHardForkNameByBatchFunc func(*orm.Batch) (string, error)
|
||||
|
||||
func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCtx *proverTaskContext,
|
||||
chunkRange *chunkIndexRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByBatchFunc) (*coordinatorType.GetTaskSchema, error) {
|
||||
startChunkIndex, endChunkIndex := chunkRange.start, chunkRange.end
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
var batchTask *orm.Batch
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBatchTask *orm.Batch
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -105,7 +82,7 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpBatchTask == nil {
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -138,17 +115,12 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
}
|
||||
|
||||
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
var (
|
||||
proverVersion = taskCtx.ProverVersion
|
||||
hardForkName = taskCtx.HardForkName
|
||||
)
|
||||
var err error
|
||||
if getHardForkName != nil {
|
||||
hardForkName, err = getHardForkName(batchTask)
|
||||
if err != nil {
|
||||
log.Error("failed to get hard fork name by batch", "task_id", batchTask.Hash, "error", err.Error())
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
hardForkName, getHardForkErr := bp.hardForkName(ctx, batchTask)
|
||||
if getHardForkErr != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("retrieve hard fork name by batch failed", "task_id", batchTask.Hash, "err", getHardForkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
@@ -156,10 +128,10 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: proverVersion,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
|
||||
@@ -170,7 +142,7 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
|
||||
@@ -187,115 +159,21 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName string) (*chunkIndexRange, error) {
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(hardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
|
||||
return nil, err
|
||||
func (bp *BatchProverTask) hardForkName(ctx *gin.Context, batchTask *orm.Batch) (string, error) {
|
||||
startChunk, getChunkErr := bp.chunkOrm.GetChunkByHash(ctx, batchTask.StartChunkHash)
|
||||
if getChunkErr != nil {
|
||||
return "", getChunkErr
|
||||
}
|
||||
|
||||
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
|
||||
// so the hard fork chunk's start_block_number must be ForkBlockNumber
|
||||
var startChunkIndex uint64 = 0
|
||||
var endChunkIndex uint64 = math.MaxInt64
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", hardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
return nil, nil
|
||||
}
|
||||
startChunkIndex = startChunk.Index
|
||||
l2Block, getBlockErr := bp.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", hardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
// toChunk being nil only indicates that we haven't yet reached the fork boundary
|
||||
// don't need change the endChunkIndex of math.MaxInt64
|
||||
endChunkIndex = toChunk.Index
|
||||
}
|
||||
}
|
||||
return &chunkIndexRange{startChunkIndex, endChunkIndex}, nil
|
||||
hardForkName := forks.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
chunkRange, err := bp.getChunkRangeByName(ctx, taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkRange == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, nil)
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
var (
|
||||
hardForkNames [2]string
|
||||
chunkRanges [2]*chunkIndexRange
|
||||
err error
|
||||
)
|
||||
var chunkRange *chunkIndexRange
|
||||
for i := 0; i < 2; i++ {
|
||||
hardForkNames[i] = bp.reverseVkMap[getTaskParameter.VKs[i]]
|
||||
chunkRanges[i], err = bp.getChunkRangeByName(ctx, hardForkNames[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkRanges[i] != nil {
|
||||
if chunkRange == nil {
|
||||
chunkRange = chunkRanges[i]
|
||||
} else {
|
||||
chunkRange = chunkRange.merge(*chunkRanges[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
if chunkRange == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var hardForkName string
|
||||
getHardForkName := func(batch *orm.Batch) (string, error) {
|
||||
for i := 0; i < 2; i++ {
|
||||
if chunkRanges[i] != nil && chunkRanges[i].contains(batch.StartChunkIndex, batch.EndChunkIndex) {
|
||||
hardForkName = hardForkNames[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if hardForkName == "" {
|
||||
log.Warn("get batch not belongs to any hard fork name", "batch id", batch.Index)
|
||||
return "", fmt.Errorf("get batch not belongs to any hard fork name, batch id: %d", batch.Index)
|
||||
}
|
||||
return hardForkName, nil
|
||||
}
|
||||
schema, err := bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, getHardForkName)
|
||||
if schema != nil && err == nil {
|
||||
schema.HardForkName = hardForkName
|
||||
return schema, nil
|
||||
}
|
||||
return schema, err
|
||||
}
|
||||
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
return bp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
return bp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
|
||||
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, batch *orm.Batch, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// get chunk from db
|
||||
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)
|
||||
if err != nil {
|
||||
@@ -303,6 +181,10 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(chunks) == 0 {
|
||||
return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID)
|
||||
}
|
||||
|
||||
var chunkProofs []*message.ChunkProof
|
||||
var chunkInfos []*message.ChunkInfo
|
||||
for _, chunk := range chunks {
|
||||
@@ -331,16 +213,25 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
ChunkProofs: chunkProofs,
|
||||
}
|
||||
|
||||
if hardForkName == "darwin" {
|
||||
batchHeader, decodeErr := codecv3.NewDABatchFromBytes(batch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header, taskID:%s err:%w", task.TaskID, decodeErr)
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
}
|
||||
|
||||
chunkProofsBytes, err := json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
|
||||
taskMsg := &coordinatorType.GetTaskSchema{
|
||||
UUID: task.UUID.String(),
|
||||
TaskID: task.TaskID,
|
||||
TaskType: int(message.ProofTypeBatch),
|
||||
TaskData: string(chunkProofsBytes),
|
||||
UUID: task.UUID.String(),
|
||||
TaskID: task.TaskID,
|
||||
TaskType: int(message.ProofTypeBatch),
|
||||
TaskData: string(chunkProofsBytes),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
225
coordinator/internal/logic/provertask/bundle_prover_task.go
Normal file
225
coordinator/internal/logic/provertask/bundle_prover_task.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package provertask
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// BundleProverTask is prover task implement for bundle proof
|
||||
type BundleProverTask struct {
|
||||
BaseProverTask
|
||||
|
||||
bundleAttemptsExceedTotal prometheus.Counter
|
||||
bundleTaskGetTaskTotal *prometheus.CounterVec
|
||||
bundleTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewBundleProverTask new a bundle collector
|
||||
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProverTask {
|
||||
bp := &BundleProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
chainCfg: chainCfg,
|
||||
cfg: cfg,
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
proverBlockListOrm: orm.NewProverBlockList(db),
|
||||
},
|
||||
bundleAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_bundle_attempts_exceed_total",
|
||||
Help: "Total number of bundle attempts exceed.",
|
||||
}),
|
||||
bundleTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_bundle_get_task_total",
|
||||
Help: "Total number of bundle get task.",
|
||||
}, []string{"fork_name"}),
|
||||
bundleTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "bundle"),
|
||||
}
|
||||
return bp
|
||||
}
|
||||
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
var bundleTask *orm.Bundle
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBundleTask *orm.Bundle
|
||||
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// bundle to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpBundleTask == nil {
|
||||
tmpBundleTask, getTaskError = bp.bundleOrm.GetUnassignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get unassigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
|
||||
if tmpBundleTask == nil {
|
||||
log.Debug("get empty bundle", "height", getTaskParameter.ProverHeight)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
bundleTask = tmpBundleTask
|
||||
break
|
||||
}
|
||||
|
||||
if bundleTask == nil {
|
||||
log.Debug("get empty unassigned bundle after retry 5 times", "height", getTaskParameter.ProverHeight)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Info("start bundle proof generation session", "task index", bundleTask.Index, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
hardForkName, getHardForkErr := bp.hardForkName(ctx, bundleTask)
|
||||
if getHardForkErr != nil {
|
||||
bp.recoverActiveAttempts(ctx, bundleTask)
|
||||
log.Error("retrieve hard fork name by bundle failed", "task_id", bundleTask.Hash, "err", getHardForkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: bundleTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBundle),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, bundleTask)
|
||||
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, bundleTask)
|
||||
log.Error("format bundle prover task failure", "task_id", bundleTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
bp.bundleTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
|
||||
bp.bundleTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
|
||||
}).Inc()
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BundleProverTask) hardForkName(ctx *gin.Context, bundleTask *orm.Bundle) (string, error) {
|
||||
startBatch, getBatchErr := bp.batchOrm.GetBatchByHash(ctx, bundleTask.StartBatchHash)
|
||||
if getBatchErr != nil {
|
||||
return "", getBatchErr
|
||||
}
|
||||
|
||||
startChunk, getChunkErr := bp.chunkOrm.GetChunkByHash(ctx, startBatch.StartChunkHash)
|
||||
if getChunkErr != nil {
|
||||
return "", getChunkErr
|
||||
}
|
||||
|
||||
l2Block, getBlockErr := bp.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
|
||||
hardForkName := forks.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// get bundle from db
|
||||
batches, err := bp.batchOrm.GetBatchesByBundleHash(ctx, task.TaskID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to get batch proofs for batch task id:%s err:%w ", task.TaskID, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(batches) == 0 {
|
||||
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
|
||||
}
|
||||
|
||||
var batchProofs []*message.BatchProof
|
||||
for _, batch := range batches {
|
||||
var proof message.BatchProof
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash)
|
||||
}
|
||||
batchProofs = append(batchProofs, &proof)
|
||||
}
|
||||
|
||||
taskDetail := message.BundleTaskDetail{
|
||||
BatchProofs: batchProofs,
|
||||
}
|
||||
|
||||
batchProofsBytes, err := json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
|
||||
taskMsg := &coordinatorType.GetTaskSchema{
|
||||
UUID: task.UUID.String(),
|
||||
TaskID: task.TaskID,
|
||||
TaskType: int(message.ProofTypeBundle),
|
||||
TaskData: string(batchProofsBytes),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BundleProverTask) recoverActiveAttempts(ctx *gin.Context, bundleTask *orm.Bundle) {
|
||||
if err := bp.bundleOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), bundleTask.Hash); err != nil {
|
||||
log.Error("failed to recover bundle active attempts", "hash", bundleTask.Hash, "error", err)
|
||||
}
|
||||
}
|
||||
@@ -33,17 +33,12 @@ type ChunkProverTask struct {
|
||||
}
|
||||
|
||||
// NewChunkProverTask new a chunk prover task
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
|
||||
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask {
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vkMap: vkMap,
|
||||
reverseVkMap: reverseMap(vkMap),
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
forkHeights: forkHeights,
|
||||
chainCfg: chainCfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
@@ -62,13 +57,11 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
return cp
|
||||
}
|
||||
|
||||
type getHardForkNameByChunkFunc func(*orm.Chunk) (string, error)
|
||||
|
||||
func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCtx *proverTaskContext,
|
||||
blockRange *blockRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByChunkFunc) (*coordinatorType.GetTaskSchema, error) {
|
||||
fromBlockNum, toBlockNum := blockRange.from, blockRange.to
|
||||
if toBlockNum > getTaskParameter.ProverHeight {
|
||||
toBlockNum = getTaskParameter.ProverHeight + 1
|
||||
// Assign the chunk proof which need to prove
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := cp.checkParameter(ctx)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
|
||||
@@ -77,7 +70,7 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpChunkTask *orm.Chunk
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -86,7 +79,7 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpChunkTask == nil {
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -119,17 +112,12 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
}
|
||||
|
||||
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
var (
|
||||
proverVersion = taskCtx.ProverVersion
|
||||
hardForkName = taskCtx.HardForkName
|
||||
err error
|
||||
)
|
||||
if getHardForkName != nil {
|
||||
hardForkName, err = getHardForkName(chunkTask)
|
||||
if err != nil {
|
||||
log.Error("failed to get hard fork name by chunk", "task_id", chunkTask.Hash, "error", err.Error())
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
hardForkName, getHardForkErr := cp.hardForkName(ctx, chunkTask)
|
||||
if getHardForkErr != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("retrieve hard fork name by chunk failed", "task_id", chunkTask.Hash, "err", getHardForkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
@@ -137,10 +125,10 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: proverVersion,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
|
||||
@@ -150,7 +138,7 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
|
||||
if err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
|
||||
@@ -167,96 +155,16 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
blockRange, err := cp.getBlockRangeByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (cp *ChunkProverTask) hardForkName(ctx *gin.Context, chunkTask *orm.Chunk) (string, error) {
|
||||
l2Block, getBlockErr := cp.blockOrm.GetL2BlockByNumber(ctx.Copy(), chunkTask.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
return cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, nil)
|
||||
hardForkName := forks.GetHardforkName(cp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
var (
|
||||
hardForkNames [2]string
|
||||
blockRanges [2]*blockRange
|
||||
err error
|
||||
)
|
||||
for i := 0; i < 2; i++ {
|
||||
hardForkNames[i] = cp.reverseVkMap[getTaskParameter.VKs[i]]
|
||||
blockRanges[i], err = cp.getBlockRangeByName(hardForkNames[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
blockRange, err := blockRanges[0].merge(*blockRanges[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var hardForkName string
|
||||
getHardForkName := func(chunk *orm.Chunk) (string, error) {
|
||||
for i := 0; i < 2; i++ {
|
||||
if blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
|
||||
hardForkName = hardForkNames[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if hardForkName == "" {
|
||||
log.Warn("get chunk not belongs to any hard fork name", "chunk id", chunk.Index)
|
||||
return "", fmt.Errorf("get chunk not belongs to any hard fork name, chunk id: %d", chunk.Index)
|
||||
}
|
||||
return hardForkName, nil
|
||||
}
|
||||
schema, err := cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, getHardForkName)
|
||||
if schema != nil && err == nil {
|
||||
schema.HardForkName = hardForkName
|
||||
return schema, nil
|
||||
}
|
||||
return schema, err
|
||||
}
|
||||
|
||||
type blockRange struct {
|
||||
from uint64
|
||||
to uint64
|
||||
}
|
||||
|
||||
func (r *blockRange) merge(o blockRange) (*blockRange, error) {
|
||||
if r.from == o.to {
|
||||
return &blockRange{o.from, r.to}, nil
|
||||
} else if r.to == o.from {
|
||||
return &blockRange{r.from, o.to}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("two ranges are not adjacent")
|
||||
}
|
||||
|
||||
func (r *blockRange) contains(start, end uint64) bool {
|
||||
return r.from <= start && r.to > end
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) {
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(hardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
|
||||
return &blockRange{fromBlockNum, toBlockNum}, nil
|
||||
}
|
||||
|
||||
// Assign the chunk proof which need to prove
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
return cp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
return cp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
|
||||
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// Get block hashes.
|
||||
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
|
||||
if dbErr != nil || len(blockHashes) == 0 {
|
||||
@@ -272,10 +180,11 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
}
|
||||
|
||||
proverTaskSchema := &coordinatorType.GetTaskSchema{
|
||||
UUID: task.UUID.String(),
|
||||
TaskID: task.TaskID,
|
||||
TaskType: int(message.ProofTypeChunk),
|
||||
TaskData: string(blockHashesBytes),
|
||||
UUID: task.UUID.String(),
|
||||
TaskID: task.TaskID,
|
||||
TaskType: int(message.ProofTypeChunk),
|
||||
TaskData: string(blockHashesBytes),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
|
||||
return proverTaskSchema, nil
|
||||
|
||||
@@ -7,11 +7,9 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
@@ -24,35 +22,25 @@ var (
|
||||
ErrHardForkName = fmt.Errorf("wrong hard fork name")
|
||||
)
|
||||
|
||||
var (
|
||||
getTaskCounterInitOnce sync.Once
|
||||
getTaskCounterVec *prometheus.CounterVec = nil
|
||||
)
|
||||
|
||||
// ProverTask the interface of a collector who send data to prover
|
||||
type ProverTask interface {
|
||||
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
|
||||
}
|
||||
|
||||
func reverseMap(input map[string]string) map[string]string {
|
||||
output := make(map[string]string, len(input))
|
||||
for k, v := range input {
|
||||
if k != "" {
|
||||
output[v] = k
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// BaseProverTask a base prover task which contain series functions
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
|
||||
// key is hardForkName, value is vk
|
||||
vkMap map[string]string
|
||||
// key is vk, value is hardForkName
|
||||
reverseVkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
cfg *config.Config
|
||||
chainCfg *params.ChainConfig
|
||||
db *gorm.DB
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
bundleOrm *orm.Bundle
|
||||
blockOrm *orm.L2Block
|
||||
proverTaskOrm *orm.ProverTask
|
||||
proverBlockListOrm *orm.ProverBlockList
|
||||
@@ -62,11 +50,10 @@ type proverTaskContext struct {
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
HardForkName string
|
||||
}
|
||||
|
||||
// checkParameter check the prover task parameter illegal
|
||||
func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*proverTaskContext, error) {
|
||||
func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, error) {
|
||||
var ptc proverTaskContext
|
||||
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
@@ -87,44 +74,6 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
|
||||
}
|
||||
|
||||
// signals that the prover is multi-circuits version
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
if len(getTaskParameter.VKs) != 2 {
|
||||
return nil, fmt.Errorf("parameter vks length must be 2")
|
||||
}
|
||||
for _, vk := range getTaskParameter.VKs {
|
||||
if _, exists := b.reverseVkMap[vk]; !exists {
|
||||
return nil, fmt.Errorf("incompatible vk. vk %s is invalid", vk)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
}
|
||||
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion)
|
||||
@@ -144,26 +93,6 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
return &ptc, nil
|
||||
}
|
||||
|
||||
func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error) {
|
||||
// when the first hard fork upgrade, the prover don't pass the fork_name to coordinator.
|
||||
// so coordinator need to be compatible.
|
||||
if forkName == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
hardForkNumber, exist := b.nameForkMap[forkName]
|
||||
if !exist {
|
||||
return 0, ErrHardForkName
|
||||
}
|
||||
|
||||
return hardForkNumber, nil
|
||||
}
|
||||
|
||||
var (
|
||||
getTaskCounterInitOnce sync.Once
|
||||
getTaskCounterVec *prometheus.CounterVec = nil
|
||||
)
|
||||
|
||||
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
|
||||
getTaskCounterInitOnce.Do(func() {
|
||||
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
|
||||
|
||||
@@ -12,8 +12,10 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
@@ -35,21 +37,26 @@ var (
|
||||
// ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success
|
||||
ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success")
|
||||
// ErrValidatorFailureVerifiedFailed failed to verify and the verifier returns error
|
||||
ErrValidatorFailureVerifiedFailed = fmt.Errorf("verification failed, verifier returns error")
|
||||
ErrValidatorFailureVerifiedFailed = errors.New("verification failed, verifier returns error")
|
||||
// ErrValidatorSuccessInvalidProof successful verified and the proof is invalid
|
||||
ErrValidatorSuccessInvalidProof = fmt.Errorf("verification succeeded, it's an invalid proof")
|
||||
ErrValidatorSuccessInvalidProof = errors.New("verification succeeded, it's an invalid proof")
|
||||
// ErrGetHardForkNameFailed failed to get hard fork name
|
||||
ErrGetHardForkNameFailed = errors.New("failed to get hard fork name")
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
ErrCoordinatorInternalFailure = errors.New("coordinator internal error")
|
||||
)
|
||||
|
||||
// ProofReceiverLogic the proof receiver logic
|
||||
type ProofReceiverLogic struct {
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
bundleOrm *orm.Bundle
|
||||
blockOrm *orm.L2Block
|
||||
proverTaskOrm *orm.ProverTask
|
||||
|
||||
db *gorm.DB
|
||||
cfg *config.ProverManager
|
||||
db *gorm.DB
|
||||
cfg *config.ProverManager
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
verifier *verifier.Verifier
|
||||
|
||||
@@ -66,14 +73,17 @@ type ProofReceiverLogic struct {
|
||||
}
|
||||
|
||||
// NewSubmitProofReceiverLogic create a proof receiver logic
|
||||
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic {
|
||||
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic {
|
||||
return &ProofReceiverLogic{
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
db: db,
|
||||
|
||||
verifier: vf,
|
||||
|
||||
@@ -124,7 +134,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, vf *ver
|
||||
// HandleZkProof handle a ZkProof submitted from a prover.
|
||||
// For now only proving/verifying error will lead to setting status as skipped.
|
||||
// db/unmarshal errors will not because they are errors on the business logic side.
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) error {
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) error {
|
||||
m.proofReceivedTotal.Inc()
|
||||
pk := ctx.GetString(coordinatorType.PublicKey)
|
||||
if len(pk) == 0 {
|
||||
@@ -134,37 +144,20 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if len(pv) == 0 {
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
// use hard_fork_name from parameter first
|
||||
// if prover support multi hard_forks, the real hard_fork_name is not set to the gin context
|
||||
hardForkName := proofParameter.HardForkName
|
||||
if hardForkName == "" {
|
||||
hardForkName = ctx.GetString(coordinatorType.HardForkName)
|
||||
}
|
||||
|
||||
var proverTask *orm.ProverTask
|
||||
var err error
|
||||
if proofParameter.UUID != "" {
|
||||
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err)
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
}
|
||||
} else {
|
||||
// TODO When prover all have upgrade, need delete this logic
|
||||
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx.Copy(), proofMsg.Type, proofMsg.ID, pk, pv)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
}
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofParameter.TaskID, "error", err)
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
}
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
|
||||
log.Info("handling zk proof", "proofID", proofParameter.TaskID, "proverName", proverTask.ProverName,
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
|
||||
|
||||
if err = m.validator(ctx.Copy(), proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
|
||||
if err = m.validator(ctx.Copy(), proverTask, pk, proofParameter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -172,18 +165,39 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
|
||||
success := true
|
||||
var verifyErr error
|
||||
// only verify batch proof. chunk proof verifier have been disabled after Bernoulli
|
||||
if proofMsg.Type == message.ProofTypeBatch {
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
|
||||
hardForkName, getHardForkErr := m.hardForkName(ctx, proofParameter.TaskID, proofParameter.TaskType)
|
||||
if getHardForkErr != nil {
|
||||
return ErrGetHardForkNameFailed
|
||||
}
|
||||
|
||||
switch message.ProofType(proofParameter.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
var chunkProof message.ChunkProof
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(&chunkProof, hardForkName)
|
||||
case message.ProofTypeBatch:
|
||||
var batchProof message.BatchProof
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(&batchProof, hardForkName)
|
||||
case message.ProofTypeBundle:
|
||||
var bundleProof message.BundleProof
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyBundleProof(&bundleProof)
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
m.verifierFailureTotal.WithLabelValues(pv).Inc()
|
||||
|
||||
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
|
||||
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofParameter)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofParameter.TaskID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofParameter.TaskType, "proof time", proofTimeSec, "error", verifyErr)
|
||||
|
||||
if verifyErr != nil {
|
||||
return ErrValidatorFailureVerifiedFailed
|
||||
@@ -193,13 +207,13 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
|
||||
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
|
||||
|
||||
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
|
||||
log.Info("proof verified and valid", "proof id", proofParameter.TaskID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofParameter.TaskType, "proof time", proofTimeSec)
|
||||
|
||||
if err := m.closeProofTask(ctx.Copy(), proverTask, proofMsg, proofTimeSec); err != nil {
|
||||
if err := m.closeProofTask(ctx.Copy(), proverTask, proofParameter, proofTimeSec); err != nil {
|
||||
m.proofSubmitFailure.Inc()
|
||||
|
||||
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
|
||||
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofParameter)
|
||||
|
||||
return ErrCoordinatorInternalFailure
|
||||
}
|
||||
@@ -212,7 +226,6 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batch.BatchHash)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -226,7 +239,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofParameter coordinatorType.SubmitProofParameter) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
m.validateFailureTotal.Inc()
|
||||
@@ -243,9 +256,9 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
// (ii) set the maximum failure retry times
|
||||
log.Warn(
|
||||
"cannot submit valid proof for a prover task twice",
|
||||
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
|
||||
"taskType", proverTask.TaskType, "hash", proofParameter.TaskID,
|
||||
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
|
||||
"proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
|
||||
"proverPublicKey", proverTask.ProverPublicKey,
|
||||
)
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
@@ -253,59 +266,59 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
if proofMsg.Status != message.StatusOk {
|
||||
if proofParameter.Status != int(message.StatusOk) {
|
||||
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
|
||||
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
|
||||
|
||||
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeSubmitStatusNotOk, proofMsg)
|
||||
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeSubmitStatusNotOk, proofParameter)
|
||||
|
||||
m.validateFailureProverTaskStatusNotOk.Inc()
|
||||
|
||||
log.Info("proof generated by prover failed",
|
||||
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"taskType", proofParameter.TaskType, "hash", proofParameter.TaskID, "proverName", proverTask.ProverName,
|
||||
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
|
||||
"failureMessage", failureMsg, "forkName", forkName)
|
||||
"failureMessage", failureMsg)
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
return ErrValidatorFailureProofTimeout
|
||||
}
|
||||
|
||||
// store the proof to prover task
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofParameter); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofParameter.TaskID, "proverPublicKey", pk,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
|
||||
}
|
||||
|
||||
// if the batch/chunk have proved and verifier success, need skip this submit proof
|
||||
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
|
||||
if m.checkIsTaskSuccess(ctx, proofParameter.TaskID, message.ProofType(proofParameter.TaskType)) {
|
||||
m.validateFailureProverTaskHaveVerifier.Inc()
|
||||
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
|
||||
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofParameter.TaskID,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
|
||||
return ErrValidatorFailureTaskHaveVerifiedSuccess
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, failureType types.ProverTaskFailureType, proofMsg *message.ProofMsg) {
|
||||
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, failureType types.ProverTaskFailureType, proofParameter coordinatorType.SubmitProofParameter) {
|
||||
log.Info("proof recover update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
|
||||
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskUnassigned.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProverProofInvalid, failureType, 0); err != nil {
|
||||
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofInvalid, failureType, 0); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", proverTask.TaskID, "pubKey", proverTask.ProverPublicKey, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
|
||||
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofParameter coordinatorType.SubmitProofParameter, proofTimeSec uint64) error {
|
||||
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
|
||||
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil {
|
||||
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
|
||||
return err
|
||||
}
|
||||
@@ -314,14 +327,14 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm
|
||||
|
||||
// UpdateProofStatus update the chunk/batch task and session info status
|
||||
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *orm.ProverTask,
|
||||
proofMsg *message.ProofMsg, status types.ProverProveStatus, failureType types.ProverTaskFailureType, proofTimeSec uint64) error {
|
||||
proofParameter coordinatorType.SubmitProofParameter, status types.ProverProveStatus, failureType types.ProverTaskFailureType, proofTimeSec uint64) error {
|
||||
err := m.db.Transaction(func(tx *gorm.DB) error {
|
||||
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatusAndFailureType(ctx, proverTask.UUID, status, failureType, tx); updateErr != nil {
|
||||
log.Error("failed to update prover task proving status and failure type", "uuid", proverTask.UUID, "error", updateErr)
|
||||
return updateErr
|
||||
}
|
||||
|
||||
switch proofMsg.Type {
|
||||
switch message.ProofType(proofParameter.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
if err := m.chunkOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil {
|
||||
log.Error("failed to update chunk proving_status as failed", "hash", proverTask.TaskID, "error", err)
|
||||
@@ -332,21 +345,28 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
|
||||
log.Error("failed to update batch proving_status as failed", "hash", proverTask.TaskID, "error", err)
|
||||
return err
|
||||
}
|
||||
case message.ProofTypeBundle:
|
||||
if err := m.bundleOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil {
|
||||
log.Error("failed to update bundle proving_status as failed", "hash", proverTask.TaskID, "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// if the block batch has proof verified, so the failed status not update block batch proving status
|
||||
if m.checkIsTaskSuccess(ctx, proverTask.TaskID, proofMsg.Type) {
|
||||
if m.checkIsTaskSuccess(ctx, proverTask.TaskID, message.ProofType(proofParameter.TaskType)) {
|
||||
log.Info("update proof status skip because this chunk/batch has been verified", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
if status == types.ProverProofValid {
|
||||
var storeProofErr error
|
||||
switch proofMsg.Type {
|
||||
switch message.ProofType(proofParameter.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
storeProofErr = m.chunkOrm.UpdateProofAndProvingStatusByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, types.ProvingTaskVerified, proofTimeSec, tx)
|
||||
storeProofErr = m.chunkOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx)
|
||||
case message.ProofTypeBatch:
|
||||
storeProofErr = m.batchOrm.UpdateProofAndProvingStatusByHash(ctx, proofMsg.ID, proofMsg.BatchProof, types.ProvingTaskVerified, proofTimeSec, tx)
|
||||
storeProofErr = m.batchOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx)
|
||||
case message.ProofTypeBundle:
|
||||
storeProofErr = m.bundleOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx)
|
||||
}
|
||||
if storeProofErr != nil {
|
||||
log.Error("failed to store chunk/batch proof and proving status", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey, "error", storeProofErr)
|
||||
@@ -360,7 +380,7 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
|
||||
return err
|
||||
}
|
||||
|
||||
if status == types.ProverProofValid && proofMsg.Type == message.ProofTypeChunk {
|
||||
if status == types.ProverProofValid && message.ProofType(proofParameter.TaskType) == message.ProofTypeChunk {
|
||||
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {
|
||||
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
|
||||
return checkReadyErr
|
||||
@@ -385,24 +405,63 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
case message.ProofTypeBundle:
|
||||
provingStatus, err = m.bundleOrm.GetProvingStatusByHash(ctx, hash)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return provingStatus == types.ProvingTaskVerified
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg) error {
|
||||
// store the proof to prover task
|
||||
var proofBytes []byte
|
||||
var marshalErr error
|
||||
switch proofMsg.Type {
|
||||
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofParameter coordinatorType.SubmitProofParameter) error {
|
||||
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proverTask.UUID, []byte(proofParameter.Proof))
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) hardForkName(ctx *gin.Context, hash string, proofType int) (string, error) {
|
||||
var (
|
||||
bundle *orm.Bundle
|
||||
batch *orm.Batch
|
||||
chunk *orm.Chunk
|
||||
err error
|
||||
)
|
||||
|
||||
switch message.ProofType(proofType) {
|
||||
case message.ProofTypeChunk:
|
||||
proofBytes, marshalErr = json.Marshal(proofMsg.ChunkProof)
|
||||
chunk, err = m.chunkOrm.GetChunkByHash(ctx, hash)
|
||||
case message.ProofTypeBatch:
|
||||
proofBytes, marshalErr = json.Marshal(proofMsg.BatchProof)
|
||||
batch, err = m.batchOrm.GetBatchByHash(ctx, hash)
|
||||
case message.ProofTypeBundle:
|
||||
bundle, err = m.bundleOrm.GetBundleByHash(ctx, hash)
|
||||
}
|
||||
|
||||
if len(proofBytes) == 0 || marshalErr != nil {
|
||||
return fmt.Errorf("updateProverTaskProof marshal proof error:%w", marshalErr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proverTask.UUID, proofBytes)
|
||||
|
||||
if bundle != nil {
|
||||
batch, err = m.batchOrm.GetBatchByHash(ctx, bundle.StartBatchHash)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if batch != nil {
|
||||
chunk, err = m.chunkOrm.GetChunkByHash(ctx, batch.StartChunkHash)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if chunk == nil {
|
||||
return "", errors.New("failed to find chunk")
|
||||
}
|
||||
|
||||
l2Block, getBlockErr := m.blockOrm.GetL2BlockByNumber(ctx.Copy(), chunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
|
||||
hardForkName := forks.GetHardforkName(m.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
Binary file not shown.
Binary file not shown.
11
coordinator/internal/logic/verifier/legacy_vk/upgrade_vks.sh
Executable file
11
coordinator/internal/logic/verifier/legacy_vk/upgrade_vks.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
work_dir="$(dirname -- "${BASH_SOURCE[0]}")"
|
||||
work_dir="$(cd -- "$work_dir" && pwd)"
|
||||
echo $work_dir
|
||||
|
||||
rm $work_dir/*.vkey
|
||||
|
||||
version=release-v0.11.4
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/${version}/chunk_vk.vkey -O $work_dir/chunk_vk.vkey
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/${version}/agg_vk.vkey -O $work_dir/agg_vk.vkey
|
||||
@@ -10,29 +10,13 @@ import (
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
batchVKMap[cfg.ForkName] = ""
|
||||
chunkVKMap[cfg.ForkName] = ""
|
||||
batchVKMap := map[string]string{cfg.ForkName: "mock_vk"}
|
||||
chunkVKMap := map[string]string{cfg.ForkName: "mock_vk"}
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof return a mock verification result for a ChunkProof.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
@@ -46,3 +30,11 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// VerifyBundleProof return a mock verification result for a BundleProof.
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.BundleProof) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,8 @@ const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a rust ffi to a halo2 verifier.
|
||||
type Verifier struct {
|
||||
cfg *config.VerifierConfig
|
||||
ChunkVKMap map[string]string
|
||||
BatchVKMap map[string]string
|
||||
cfg *config.VerifierConfig
|
||||
ChunkVKMap map[string]string
|
||||
BatchVKMap map[string]string
|
||||
BundleVkMap map[string]string
|
||||
}
|
||||
|
||||
@@ -22,34 +22,18 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
// NewVerifier Sets up a rust ffi to call verify.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
if cfg.MockMode {
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
|
||||
batchVKMap[cfg.ForkName] = ""
|
||||
chunkVKMap[cfg.ForkName] = ""
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
chunkVKMap := map[string]string{cfg.ForkName: "mock_vk"}
|
||||
batchVKMap := map[string]string{cfg.ForkName: "mock_vk"}
|
||||
bundleVKMap := map[string]string{cfg.ForkName: "mock_vk"}
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap, BundleVkMap: bundleVKMap}, nil
|
||||
}
|
||||
paramsPathStr := C.CString(cfg.ParamsPath)
|
||||
assetsPathStr := C.CString(cfg.AssetsPath)
|
||||
@@ -62,19 +46,25 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
|
||||
|
||||
v := &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: make(map[string]string),
|
||||
BatchVKMap: make(map[string]string),
|
||||
cfg: cfg,
|
||||
ChunkVKMap: make(map[string]string),
|
||||
BatchVKMap: make(map[string]string),
|
||||
BundleVkMap: make(map[string]string),
|
||||
}
|
||||
|
||||
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
|
||||
bundleVK, err := v.readVK(path.Join(cfg.AssetsPath, "vk_bundle.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
|
||||
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "vk_batch.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "vk_chunk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.BundleVkMap[cfg.ForkName] = bundleVK
|
||||
v.BatchVKMap[cfg.ForkName] = batchVK
|
||||
v.ChunkVKMap[cfg.ForkName] = chunkVK
|
||||
|
||||
@@ -112,7 +102,34 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string)
|
||||
}
|
||||
|
||||
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Info("Start to verify chunk proof", "forkName", forkName)
|
||||
proofStr := C.CString(string(buf))
|
||||
forkNameStr := C.CString(forkName)
|
||||
defer func() {
|
||||
C.free(unsafe.Pointer(proofStr))
|
||||
C.free(unsafe.Pointer(forkNameStr))
|
||||
}()
|
||||
|
||||
verified := C.verify_chunk_proof(proofStr, forkNameStr)
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.BundleProof) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
@@ -131,8 +148,8 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
C.free(unsafe.Pointer(proofStr))
|
||||
}()
|
||||
|
||||
log.Info("Start to verify chunk proof ...")
|
||||
verified := C.verify_chunk_proof(proofStr)
|
||||
log.Info("Start to verify bundle proof ...")
|
||||
verified := C.verify_bundle_proof(proofStr)
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
@@ -164,7 +181,7 @@ func (v *Verifier) loadEmbedVK() error {
|
||||
return err
|
||||
}
|
||||
|
||||
v.BatchVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap["curie"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["curie"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
@@ -13,7 +12,6 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
@@ -59,6 +57,9 @@ type Batch struct {
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// bundle
|
||||
BundleHash string `json:"bundle_hash" gorm:"column:bundle_hash"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -76,12 +77,12 @@ func (*Batch) TableName() string {
|
||||
}
|
||||
|
||||
// GetUnassignedBatch retrieves unassigned batch based on the specified limit.
|
||||
// The returned batch are sorted in ascending order by their index.
|
||||
func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
var batch Batch
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady))
|
||||
err := db.Raw(sql).Scan(&batch).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedBatch error: %w", err)
|
||||
@@ -93,12 +94,12 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChun
|
||||
}
|
||||
|
||||
// GetAssignedBatch retrieves assigned batch based on the specified limit.
|
||||
// The returned batch are sorted in ascending order by their index.
|
||||
func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
var batch Batch
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady))
|
||||
err := db.Raw(sql).Scan(&batch).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetAssignedBatch error: %w", err)
|
||||
@@ -184,6 +185,63 @@ func (o *Batch) GetAttemptsByHash(ctx context.Context, hash string) (int16, int1
|
||||
return batch.ActiveAttempts, batch.TotalAttempts, nil
|
||||
}
|
||||
|
||||
// CheckIfBundleBatchProofsAreReady checks if all proofs for all batches of a given bundleHash are collected.
|
||||
func (o *Batch) CheckIfBundleBatchProofsAreReady(ctx context.Context, bundleHash string) (bool, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("bundle_hash = ? AND proving_status != ?", bundleHash, types.ProvingTaskVerified)
|
||||
|
||||
var count int64
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return false, fmt.Errorf("Chunk.CheckIfBundleBatchProofsAreReady error: %w, bundle hash: %v", err, bundleHash)
|
||||
}
|
||||
return count == 0, nil
|
||||
}
|
||||
|
||||
// GetBatchByHash retrieves the given batch.
|
||||
func (o *Batch) GetBatchByHash(ctx context.Context, hash string) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash = ?", hash)
|
||||
|
||||
var batch Batch
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetBatchByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// GetLatestFinalizedBatch retrieves the latest finalized batch from the database.
|
||||
func (o *Batch) GetLatestFinalizedBatch(ctx context.Context) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("rollup_status = ?", int(types.RollupFinalized))
|
||||
db = db.Order("index desc")
|
||||
|
||||
var latestFinalizedBatch Batch
|
||||
if err := db.First(&latestFinalizedBatch).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
|
||||
}
|
||||
return &latestFinalizedBatch, nil
|
||||
}
|
||||
|
||||
// GetBatchesByBundleHash retrieves the given batch.
|
||||
func (o *Batch) GetBatchesByBundleHash(ctx context.Context, bundleHash string) ([]*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("bundle_hash = ?", bundleHash)
|
||||
db = db.Order("index ASC")
|
||||
|
||||
var batches []*Batch
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetBatchesByBundleHash error: %w, bundle hash: %v", err, bundleHash)
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if batch == nil {
|
||||
@@ -317,18 +375,14 @@ func (o *Batch) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA
|
||||
}
|
||||
|
||||
// UpdateProofAndProvingStatusByHash updates the batch proof and proving status by hash.
|
||||
func (o *Batch) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.BatchProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
func (o *Batch) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proof"] = proofBytes
|
||||
updateFields["proof"] = proof
|
||||
updateFields["proving_status"] = provingStatus
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
updateFields["proved_at"] = utils.NowUTC()
|
||||
|
||||
229
coordinator/internal/orm/bundle.go
Normal file
229
coordinator/internal/orm/bundle.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Bundle represents a bundle of batches.
|
||||
type Bundle struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
StartBatchIndex uint64 `json:"start_batch_index" gorm:"column:start_batch_index"`
|
||||
StartBatchHash string `json:"start_batch_hash" gorm:"column:start_batch_hash"`
|
||||
EndBatchIndex uint64 `json:"end_batch_index" gorm:"column:end_batch_index"`
|
||||
EndBatchHash string `json:"end_batch_hash" gorm:"column:end_batch_hash"`
|
||||
|
||||
// proof
|
||||
BatchProofsStatus int16 `json:"batch_proofs_status" gorm:"column:batch_proofs_status;default:1"`
|
||||
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
TotalAttempts int16 `json:"total_attempts" gorm:"column:total_attempts;default:0"`
|
||||
ActiveAttempts int16 `json:"active_attempts" gorm:"column:active_attempts;default:0"`
|
||||
|
||||
// rollup
|
||||
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewBundle creates a new Bundle database instance.
|
||||
func NewBundle(db *gorm.DB) *Bundle {
|
||||
return &Bundle{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the Bundle model.
|
||||
func (*Bundle) TableName() string {
|
||||
return "bundle"
|
||||
}
|
||||
|
||||
// GetUnassignedBundle retrieves unassigned bundle based on the specified limit.
|
||||
// The returned batch sorts in ascending order by their index.
|
||||
func (o *Bundle) GetUnassignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
|
||||
var bundle Bundle
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.BatchProofsStatusReady))
|
||||
err := db.Raw(sql).Scan(&bundle).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedBundle error: %w", err)
|
||||
}
|
||||
if bundle.StartBatchHash == "" || bundle.EndBatchHash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &bundle, nil
|
||||
}
|
||||
|
||||
// GetAssignedBundle retrieves assigned bundle based on the specified limit.
|
||||
// The returned bundle sorts in ascending order by their index.
|
||||
func (o *Bundle) GetAssignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
|
||||
var bundle Bundle
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.BatchProofsStatusReady))
|
||||
err := db.Raw(sql).Scan(&bundle).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Bundle.GetAssignedBatch error: %w", err)
|
||||
}
|
||||
if bundle.StartBatchHash == "" || bundle.EndBatchHash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &bundle, nil
|
||||
}
|
||||
|
||||
// GetProvingStatusByHash retrieves the proving status of a bundle given its hash.
|
||||
func (o *Bundle) GetProvingStatusByHash(ctx context.Context, hash string) (types.ProvingStatus, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Select("proving_status")
|
||||
db = db.Where("hash = ?", hash)
|
||||
|
||||
var bundle Bundle
|
||||
if err := db.Find(&bundle).Error; err != nil {
|
||||
return types.ProvingStatusUndefined, fmt.Errorf("Bundle.GetProvingStatusByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
return types.ProvingStatus(bundle.ProvingStatus), nil
|
||||
}
|
||||
|
||||
// GetBundleByHash retrieves the given
|
||||
func (o *Bundle) GetBundleByHash(ctx context.Context, bundleHash string) (*Bundle, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash = ?", bundleHash)
|
||||
|
||||
var bundle Bundle
|
||||
if err := db.First(&bundle).Error; err != nil {
|
||||
return nil, fmt.Errorf("Bundle.GetBundleByHash error: %w, bundle hash: %v", err, bundleHash)
|
||||
}
|
||||
return &bundle, nil
|
||||
}
|
||||
|
||||
// GetUnassignedAndBatchesUnreadyBundles get the bundles which is unassigned and batches are not ready
|
||||
func (o *Bundle) GetUnassignedAndBatchesUnreadyBundles(ctx context.Context, offset, limit int) ([]*Bundle, error) {
|
||||
if offset < 0 || limit < 0 {
|
||||
return nil, errors.New("limit and offset must not be smaller than 0")
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
|
||||
db = db.Where("batch_proofs_status = ?", types.BatchProofsStatusPending)
|
||||
db = db.Order("index ASC")
|
||||
db = db.Offset(offset)
|
||||
db = db.Limit(limit)
|
||||
|
||||
var bundles []*Bundle
|
||||
if err := db.Find(&bundles).Error; err != nil {
|
||||
return nil, fmt.Errorf("Bundle.GetUnassignedAndBatchesUnreadyBundles error: %w", err)
|
||||
}
|
||||
return bundles, nil
|
||||
}
|
||||
|
||||
// UpdateBatchProofsStatusByBatchHash updates the status of batch_proofs_status field for a given bundle hash.
|
||||
func (o *Bundle) UpdateBatchProofsStatusByBatchHash(ctx context.Context, bundleHash string, status types.BatchProofsStatus) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash = ?", bundleHash)
|
||||
|
||||
if err := db.Update("batch_proofs_status", status).Error; err != nil {
|
||||
return fmt.Errorf("Bundle.UpdateBatchProofsStatusByBatchHash error: %w, bundle hash: %v, status: %v", err, bundleHash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatusFailed updates the proving status failed of a bundle.
|
||||
func (o *Bundle) UpdateProvingStatusFailed(ctx context.Context, bundleHash string, maxAttempts uint8, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash", bundleHash)
|
||||
db = db.Where("total_attempts >= ?", maxAttempts)
|
||||
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
|
||||
if err := db.Update("proving_status", int(types.ProvingTaskFailed)).Error; err != nil {
|
||||
return fmt.Errorf("Bundle.UpdateProvingStatus error: %w, bundle hash: %v, status: %v", err, bundleHash, types.ProvingTaskFailed.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProofAndProvingStatusByHash updates the bundle proof and proving status by hash.
|
||||
func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proof"] = proof
|
||||
updateFields["proving_status"] = provingStatus
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
updateFields["proved_at"] = utils.NowUTC()
|
||||
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBundleAttempts atomically increments the attempts count for the earliest available bundle that meets the conditions.
|
||||
func (o *Bundle) UpdateBundleAttempts(ctx context.Context, hash string, curActiveAttempts, curTotalAttempts int16) (int64, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash = ?", hash)
|
||||
db = db.Where("active_attempts = ?", curActiveAttempts)
|
||||
db = db.Where("total_attempts = ?", curTotalAttempts)
|
||||
result := db.Updates(map[string]interface{}{
|
||||
"proving_status": types.ProvingTaskAssigned,
|
||||
"total_attempts": gorm.Expr("total_attempts + 1"),
|
||||
"active_attempts": gorm.Expr("active_attempts + 1"),
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
return 0, fmt.Errorf("failed to update bundle, err:%w", result.Error)
|
||||
}
|
||||
return result.RowsAffected, nil
|
||||
}
|
||||
|
||||
// DecreaseActiveAttemptsByHash decrements the active_attempts of a bundle given its hash.
|
||||
func (o *Bundle) DecreaseActiveAttemptsByHash(ctx context.Context, bundleHash string, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash = ?", bundleHash)
|
||||
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
|
||||
db = db.Where("active_attempts > ?", 0)
|
||||
result := db.UpdateColumn("active_attempts", gorm.Expr("active_attempts - 1"))
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("Bundle.DecreaseActiveAttemptsByHash error: %w, bundle hash: %v", result.Error, bundleHash)
|
||||
}
|
||||
if result.RowsAffected == 0 {
|
||||
log.Warn("No rows were affected in DecreaseActiveAttemptsByHash", "bundle hash", bundleHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -74,11 +74,11 @@ func (*Chunk) TableName() string {
|
||||
|
||||
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
var chunk Chunk
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err)
|
||||
@@ -91,11 +91,11 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum
|
||||
|
||||
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
var chunk Chunk
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err)
|
||||
@@ -340,18 +340,14 @@ func (o *Chunk) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA
|
||||
}
|
||||
|
||||
// UpdateProofAndProvingStatusByHash updates the chunk proof and proving_status by hash.
|
||||
func (o *Chunk) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.ChunkProof, status types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
func (o *Chunk) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, status types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proof"] = proofBytes
|
||||
updateFields["proof"] = proof
|
||||
updateFields["proving_status"] = int(status)
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
updateFields["proved_at"] = utils.NowUTC()
|
||||
|
||||
@@ -74,6 +74,19 @@ func (o *L2Block) GetL2BlockHashesByChunkHash(ctx context.Context, chunkHash str
|
||||
return blockHashes, nil
|
||||
}
|
||||
|
||||
// GetL2BlockByNumber retrieves the L2 block by l2 block number
|
||||
func (o *L2Block) GetL2BlockByNumber(ctx context.Context, blockNumber uint64) (*L2Block, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Where("number = ?", blockNumber)
|
||||
|
||||
var l2Block L2Block
|
||||
if err := db.First(&l2Block).Error; err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlockByNumber error: %w, chunk block number: %v", err, blockNumber)
|
||||
}
|
||||
return &l2Block, nil
|
||||
}
|
||||
|
||||
// InsertL2Blocks inserts l2 blocks into the "l2_block" table.
|
||||
// for unit test
|
||||
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block) error {
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
package types
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const (
|
||||
// PublicKey the public key for context
|
||||
@@ -9,26 +18,88 @@ const (
|
||||
ProverName = "prover_name"
|
||||
// ProverVersion the prover version for context
|
||||
ProverVersion = "prover_version"
|
||||
// HardForkName the fork name for context
|
||||
HardForkName = "hard_fork_name"
|
||||
)
|
||||
|
||||
// Message the login message struct
|
||||
type Message struct {
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// LoginParameter for /login api
|
||||
type LoginParameter struct {
|
||||
Message Message `form:"message" json:"message" binding:"required"`
|
||||
Signature string `form:"signature" json:"signature" binding:"required"`
|
||||
}
|
||||
|
||||
// LoginSchema for /login response
|
||||
type LoginSchema struct {
|
||||
Time time.Time `json:"time"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// Message the login message struct
|
||||
type Message struct {
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
ProverTypes []ProverType `form:"prover_types" json:"prover_types"`
|
||||
VKs []string `form:"vks" json:"vks"`
|
||||
}
|
||||
|
||||
// LoginParameter for /login api
|
||||
type LoginParameter struct {
|
||||
Message Message `form:"message" json:"message" binding:"required"`
|
||||
PublicKey string `form:"public_key" json:"public_key"`
|
||||
Signature string `form:"signature" json:"signature" binding:"required"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *LoginParameter) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Message.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *LoginParameter) Verify() (bool, error) {
|
||||
hash, err := a.Message.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
expectedPubKey, err := a.Message.DecodeAndUnmarshalPubkey(a.PublicKey)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
sig := common.FromHex(a.Signature)
|
||||
isValid := crypto.VerifySignature(crypto.CompressPubkey(expectedPubKey), hash, sig[:len(sig)-1])
|
||||
return isValid, nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Message) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
// DecodeAndUnmarshalPubkey decodes a hex-encoded public key and unmarshal it into an ecdsa.PublicKey
|
||||
func (i *Message) DecodeAndUnmarshalPubkey(pubKeyHex string) (*ecdsa.PublicKey, error) {
|
||||
// Decode hex string to bytes
|
||||
byteKey, err := hex.DecodeString(pubKeyHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal bytes to ECDSA public key
|
||||
pubKey, err := crypto.DecompressPubkey(byteKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
78
coordinator/internal/types/auth_test.go
Normal file
78
coordinator/internal/types/auth_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
privateKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&privateKey.PublicKey))
|
||||
|
||||
var authMsg LoginParameter
|
||||
t.Run("sign", func(t *testing.T) {
|
||||
authMsg = LoginParameter{
|
||||
Message: Message{
|
||||
ProverName: "test1",
|
||||
ProverVersion: "v0.0.1",
|
||||
Challenge: "abcdef",
|
||||
ProverTypes: []ProverType{ProverTypeBatch},
|
||||
VKs: []string{"vk1", "vk2"},
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
}
|
||||
|
||||
err = authMsg.SignWithKey(privateKey)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid verify", func(t *testing.T) {
|
||||
ok, verifyErr := authMsg.Verify()
|
||||
assert.True(t, ok)
|
||||
assert.NoError(t, verifyErr)
|
||||
})
|
||||
|
||||
t.Run("invalid verify", func(t *testing.T) {
|
||||
authMsg.Message.Challenge = "abcdefgh"
|
||||
ok, verifyErr := authMsg.Verify()
|
||||
assert.False(t, ok)
|
||||
assert.NoError(t, verifyErr)
|
||||
})
|
||||
}
|
||||
|
||||
// TestGenerateSignature this unit test isn't for test, just generate the signature for manually test.
|
||||
func TestGenerateSignature(t *testing.T) {
|
||||
privateKeyHex := "8b8df68fddf7ee2724b79ccbd07799909d59b4dd4f4df3f6ecdc4fb8d56bdf4c"
|
||||
privateKeyBytes, err := hex.DecodeString(privateKeyHex)
|
||||
assert.Nil(t, err)
|
||||
privateKey, err := crypto.ToECDSA(privateKeyBytes)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&privateKey.PublicKey))
|
||||
|
||||
t.Log("publicKey: ", publicKeyHex)
|
||||
|
||||
authMsg := LoginParameter{
|
||||
Message: Message{
|
||||
ProverName: "test",
|
||||
ProverVersion: "v4.4.32-37af5ef5-38a68e2-1c5093c",
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjEzMjc5MTIsIm9yaWdfaWF0IjoxNzIxMzI0MzEyLCJyYW5kb20iOiJWMVFlT19yNEV5eGRmYUtDalprVExEa0ZIemEyNTdQRG93dTV4SnVxYTdZPSJ9.x-B_TnkTUvs8-hiMfJXejxetAP6rXfeRUmyZ3S0uBiM",
|
||||
ProverTypes: []ProverType{ProverTypeBatch},
|
||||
VKs: []string{"AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD9jfGkei+f0wNYpkjW7JO12EfU7CjYVBo+PGku3zaQJI64lbn6BwyTBa4RfrPFpV5mP47ix0sXZ+Wt5wklMLRW7OIJb1yfCDm+gkSsp3/Zqrxt4SY4rQ4WtHfynTCQ0KDi78jNuiFvwxO3ub3DkgGVaxMkGxTRP/Vz6E7MCZMUBR5wZFcMzJn+73f0wYjDxfj00krg9O1VrwVxbVV1ycLR6oQLcOgm/l+xwth8io0vDpF9OY21gD5DgJn9GgcYe8KoRVEbEqApLZPdBibpcSMTY9czZI2LnFcqrDDmYvhEwgjhZrsTog2xLXOODoOupZ/is5ekQ9Gi0y871b1mLlCGA=",
|
||||
"AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD1DEjW4Kell67H07wazT5DdzrSh4+amh+cmosQHp9p9snFypyoBGt3UHtoJGQBZlywZWDS9ht5pnaEoGBdaKcQk+lFb+WxTiId0KOAa0mafTZTQw8yToy57Jple64qzlRu1dux30tZZGuerLN1CKzg5Xl2iOpMK+l87jCINwVp5cUtF/XrvhBbU7onKh3KBiy99iUqVyA3Y6iiIZhGKWBSuSA4bNgDYIoVkqjHpdL35aEShoRO6pNXt7rDzxFoPzH0JuPI54nE4OhVrzZXwtkAEosxVa/fszcE092FH+HhhtxZBYe/KEzwdISU9TOPdId3UF/UMYC0MiYOlqffVTgAg="},
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
}
|
||||
err = authMsg.SignWithKey(privateKey)
|
||||
assert.NoError(t, err)
|
||||
t.Log("signature: ", authMsg.Signature)
|
||||
|
||||
verify, err := authMsg.Verify()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, verify)
|
||||
}
|
||||
@@ -2,10 +2,9 @@ package types
|
||||
|
||||
// GetTaskParameter for ProverTasks request parameter
|
||||
type GetTaskParameter struct {
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"` // will be deprecated after all go_prover offline
|
||||
VKs []string `form:"vks" json:"vks"` // for rust_prover that supporting multi-circuits
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
TaskTypes []int `form:"task_types" json:"task_types"`
|
||||
}
|
||||
|
||||
// GetTaskSchema the schema data return to prover for get prover task
|
||||
|
||||
42
coordinator/internal/types/prover.go
Normal file
42
coordinator/internal/types/prover.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// ProverType represents the type of prover.
|
||||
type ProverType uint8
|
||||
|
||||
func (r ProverType) String() string {
|
||||
switch r {
|
||||
case ProverTypeChunk:
|
||||
return "prover type chunk"
|
||||
case ProverTypeBatch:
|
||||
return "prover type batch"
|
||||
default:
|
||||
return fmt.Sprintf("illegal prover type: %d", r)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ProverTypeUndefined is an unknown prover type
|
||||
ProverTypeUndefined ProverType = iota
|
||||
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks
|
||||
ProverTypeChunk
|
||||
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks
|
||||
ProverTypeBatch
|
||||
)
|
||||
|
||||
// MakeProverType make ProverType from ProofType
|
||||
func MakeProverType(proof_type message.ProofType) ProverType {
|
||||
switch proof_type {
|
||||
case message.ProofTypeChunk:
|
||||
return ProverTypeChunk
|
||||
case message.ProofTypeBatch, message.ProofTypeBundle:
|
||||
return ProverTypeBatch
|
||||
default:
|
||||
return ProverTypeUndefined
|
||||
}
|
||||
}
|
||||
@@ -2,13 +2,11 @@ package types
|
||||
|
||||
// SubmitProofParameter the SubmitProof api request parameter
|
||||
type SubmitProofParameter struct {
|
||||
// TODO when prover have upgrade, need change this field to required
|
||||
UUID string `form:"uuid" json:"uuid"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
UUID string `form:"uuid" json:"uuid"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
}
|
||||
|
||||
@@ -34,10 +34,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
forkNumberFour = 4
|
||||
forkNumberThree = 3
|
||||
forkNumberTwo = 2
|
||||
forkNumberOne = 1
|
||||
forkNumberTwo = 2
|
||||
forkNumberOne = 1
|
||||
minProverVersion = "v2.0.0"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -52,17 +51,10 @@ var (
|
||||
proverTaskOrm *orm.ProverTask
|
||||
proverBlockListOrm *orm.ProverBlockList
|
||||
|
||||
block1 *encoding.Block
|
||||
block2 *encoding.Block
|
||||
|
||||
chunk *encoding.Chunk
|
||||
hardForkChunk1 *encoding.Chunk
|
||||
hardForkChunk2 *encoding.Chunk
|
||||
|
||||
batch *encoding.Batch
|
||||
hardForkBatch1 *encoding.Batch
|
||||
hardForkBatch2 *encoding.Batch
|
||||
|
||||
block1 *encoding.Block
|
||||
block2 *encoding.Block
|
||||
chunk *encoding.Chunk
|
||||
batch *encoding.Batch
|
||||
tokenTimeout int
|
||||
)
|
||||
|
||||
@@ -89,7 +81,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
tokenTimeout = 6
|
||||
tokenTimeout = 60
|
||||
conf = &config.Config{
|
||||
L2: &config.L2{
|
||||
ChainID: 111,
|
||||
@@ -99,11 +91,12 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
Verifier: &config.VerifierConfig{
|
||||
MockMode: true,
|
||||
},
|
||||
BatchCollectionTimeSec: 10,
|
||||
ChunkCollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 5,
|
||||
MinProverVersion: version.Version,
|
||||
BatchCollectionTimeSec: 10,
|
||||
ChunkCollectionTimeSec: 10,
|
||||
BundleCollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 5,
|
||||
MinProverVersion: minProverVersion,
|
||||
},
|
||||
Auth: &config.Auth{
|
||||
ChallengeExpireDurationSec: tokenTimeout,
|
||||
@@ -152,7 +145,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
func setEnv(t *testing.T) {
|
||||
var err error
|
||||
|
||||
version.Version = "v4.1.98"
|
||||
version.Version = "v4.2.0"
|
||||
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
@@ -186,14 +179,9 @@ func setEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk = &encoding.Chunk{Blocks: []*encoding.Block{block1, block2}}
|
||||
hardForkChunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
hardForkChunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}}
|
||||
hardForkBatch1 = &encoding.Batch{Index: 1, Chunks: []*encoding.Chunk{hardForkChunk1}}
|
||||
hardForkBatch2 = &encoding.Batch{Index: 2, Chunks: []*encoding.Chunk{hardForkChunk2}}
|
||||
|
||||
}
|
||||
|
||||
func TestApis(t *testing.T) {
|
||||
@@ -208,7 +196,6 @@ func TestApis(t *testing.T) {
|
||||
t.Run("TestInvalidProof", testInvalidProof)
|
||||
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
|
||||
t.Run("TestTimeoutProof", testTimeoutProof)
|
||||
t.Run("TestHardFork", testHardForkAssignTask)
|
||||
}
|
||||
|
||||
func testHandshake(t *testing.T) {
|
||||
@@ -261,12 +248,12 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("get empty prover task")
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
@@ -277,12 +264,12 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr = fmt.Errorf("get empty prover task")
|
||||
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
}
|
||||
@@ -301,249 +288,17 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999")
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", minProverVersion, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
assert.Equal(t, types.ErrJWTCommonErr, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", minProverVersion, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
assert.Equal(t, types.ErrJWTCommonErr, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
}
|
||||
|
||||
func testHardForkAssignTask(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
proofType message.ProofType
|
||||
forkNumbers map[string]int64
|
||||
proverForkNames []string
|
||||
exceptTaskNumber int
|
||||
exceptGetTaskErrCodes []int
|
||||
exceptGetTaskErrMsgs []string
|
||||
}{
|
||||
{ // hard fork 4, prover 4 block [2-3]
|
||||
name: "noTaskForkChunkProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"bernoulli", "bernoulli"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"bernoulli", "bernoulli"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{ // hard fork 1, prover 1 block [2-3]
|
||||
name: "noTaskForkChunkProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "homestead": forkNumberOne},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"homestead", "homestead"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "homestead": forkNumberOne},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"homestead", "homestead"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"", ""},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{ // hard fork 3, prover 3 block [2-3]
|
||||
name: "oneTaskForkChunkProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"london", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "oneTaskForkBatchProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"london", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
|
||||
},
|
||||
{ // hard fork 2, prover 2 block [2-3]
|
||||
name: "oneTaskForkChunkProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "oneTaskForkBatchProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
|
||||
},
|
||||
{ // hard fork 2, prover 2 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{
|
||||
name: "twoTaskForkBatchProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{ // hard fork 4, prover 3 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour, "istanbul": forkNumberTwo},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionMiddleHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{
|
||||
name: "twoTaskForkBatchProverVersionMiddleHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{
|
||||
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{ // hard fork 2, prover 2 block [2-3]
|
||||
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"", ""},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, tt.forkNumbers)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
chunkProof := &message.ChunkProof{
|
||||
StorageTrace: []byte("testStorageTrace"),
|
||||
Protocol: []byte("testProtocol"),
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
}
|
||||
|
||||
// the insert block number is 2 and 3
|
||||
// chunk1 batch1 contains block number 2
|
||||
// chunk2 batch2 contains block number 3
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbHardForkChunk1, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk1)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 2, dbHardForkChunk1.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk1.Hash, chunkProof, types.ProvingTaskUnassigned, 1)
|
||||
assert.NoError(t, err)
|
||||
dbHardForkBatch1, err := batchOrm.InsertBatch(context.Background(), hardForkBatch1)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, dbHardForkBatch1.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch1.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbHardForkChunk2, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk2)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 3, 100, dbHardForkChunk2.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk2.Hash, chunkProof, types.ProvingTaskUnassigned, 1)
|
||||
assert.NoError(t, err)
|
||||
dbHardForkBatch2, err := batchOrm.InsertBatch(context.Background(), hardForkBatch2)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 1, 1, dbHardForkBatch2.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch2.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
getTaskNumber := 0
|
||||
for i := 0; i < 2; i++ {
|
||||
mockProver := newMockProver(t, fmt.Sprintf("mock_prover_%d", i), coordinatorURL, tt.proofType, version.Version)
|
||||
proverTask, errCode, errMsg := mockProver.getProverTask(t, tt.proofType, tt.proverForkNames[i])
|
||||
assert.Equal(t, tt.exceptGetTaskErrCodes[i], errCode)
|
||||
assert.Equal(t, tt.exceptGetTaskErrMsgs[i], errMsg)
|
||||
if errCode != types.Success {
|
||||
continue
|
||||
}
|
||||
getTaskNumber++
|
||||
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
|
||||
}
|
||||
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testValidProof(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
|
||||
@@ -575,12 +330,12 @@ func testValidProof(t *testing.T) {
|
||||
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
|
||||
|
||||
proofStatus := verifiedSuccess
|
||||
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
exceptProofStatus := verifiedSuccess
|
||||
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType)
|
||||
assert.Equal(t, types.Success, errCode)
|
||||
assert.Equal(t, "", errMsg)
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul")
|
||||
provers[i].submitProof(t, proverTask, exceptProofStatus, types.Success)
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -641,39 +396,69 @@ func testInvalidProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, dbBatch.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbBatch.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
proofType := message.ProofTypeBatch
|
||||
provingStatus := verifiedFailed
|
||||
expectErrCode := types.ErrCoordinatorHandleZkProofFailure
|
||||
prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version)
|
||||
proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul")
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul")
|
||||
// create mock provers.
|
||||
provers := make([]*mockProver, 2)
|
||||
for i := 0; i < len(provers); i++ {
|
||||
var (
|
||||
proofType message.ProofType
|
||||
provingStatus proofStatus
|
||||
exceptCode int
|
||||
)
|
||||
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
provingStatus = verifiedSuccess
|
||||
exceptCode = types.Success
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
provingStatus = verifiedFailed
|
||||
exceptCode = types.ErrCoordinatorHandleZkProofFailure
|
||||
}
|
||||
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
|
||||
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType)
|
||||
assert.Equal(t, types.Success, errCode)
|
||||
assert.Equal(t, "", errMsg)
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, provingStatus, exceptCode)
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
chunkProofStatus types.ProvingStatus
|
||||
batchProofStatus types.ProvingStatus
|
||||
batchActiveAttempts int16
|
||||
batchMaxAttempts int16
|
||||
chunkActiveAttempts int16
|
||||
chunkMaxAttempts int16
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
if batchProofStatus == types.ProvingTaskAssigned {
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), dbBatch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskAssigned {
|
||||
return
|
||||
}
|
||||
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
|
||||
|
||||
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(chunkMaxAttempts))
|
||||
assert.Equal(t, 0, int(chunkActiveAttempts))
|
||||
|
||||
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), dbBatch.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(batchMaxAttempts))
|
||||
assert.Equal(t, 0, int(batchActiveAttempts))
|
||||
@@ -699,26 +484,38 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, dbBatch.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbBatch.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create mock provers.
|
||||
provers := make([]*mockProver, 2)
|
||||
for i := 0; i < len(provers); i++ {
|
||||
var proofType message.ProofType
|
||||
var (
|
||||
proofType message.ProofType
|
||||
exceptCode int
|
||||
exceptErrMsg string
|
||||
)
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
exceptCode = types.Success
|
||||
exceptErrMsg = ""
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
exceptCode = types.ErrCoordinatorGetTaskFailure
|
||||
exceptErrMsg = "return prover task err:coordinator internal error"
|
||||
}
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
|
||||
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
|
||||
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType)
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul")
|
||||
assert.Equal(t, errCode, exceptCode)
|
||||
assert.Equal(t, errMsg, exceptErrMsg)
|
||||
if errCode == types.Success {
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
}
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -743,7 +540,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
case <-tick:
|
||||
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), dbBatch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
|
||||
return
|
||||
@@ -754,14 +551,14 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
assert.Equal(t, 1, int(chunkMaxAttempts))
|
||||
assert.Equal(t, 0, int(chunkActiveAttempts))
|
||||
|
||||
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
|
||||
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), dbBatch.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(batchMaxAttempts))
|
||||
assert.Equal(t, 0, int(batchActiveAttempts))
|
||||
|
||||
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeChunk, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeBatch, batch.Hash)
|
||||
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeBatch, dbBatch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProverTaskProvingStatus == types.ProverProofInvalid && batchProverTaskProvingStatus == types.ProverProofInvalid {
|
||||
return
|
||||
@@ -797,18 +594,25 @@ func testTimeoutProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
encodeData, err := json.Marshal(message.ChunkProof{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbChunk.Hash, encodeData, types.ProvingTaskUnassigned, 1)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create first chunk & batch mock prover, that will not send any proof.
|
||||
chunkProver1 := newMockProver(t, "prover_test"+strconv.Itoa(0), coordinatorURL, message.ProofTypeChunk, version.Version)
|
||||
proverChunkTask, errChunkCode, errChunkMsg := chunkProver1.getProverTask(t, message.ProofTypeChunk, "istanbul")
|
||||
proverChunkTask, errChunkCode, errChunkMsg := chunkProver1.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.NotNil(t, proverChunkTask)
|
||||
assert.Equal(t, errChunkCode, types.Success)
|
||||
assert.Equal(t, errChunkMsg, "")
|
||||
|
||||
batchProver1 := newMockProver(t, "prover_test"+strconv.Itoa(1), coordinatorURL, message.ProofTypeBatch, version.Version)
|
||||
proverBatchTask, errBatchCode, errBatchMsg := batchProver1.getProverTask(t, message.ProofTypeBatch, "istanbul")
|
||||
proverBatchTask, errBatchCode, errBatchMsg := batchProver1.getProverTask(t, message.ProofTypeBatch)
|
||||
assert.NotNil(t, proverBatchTask)
|
||||
assert.Equal(t, errBatchCode, types.Success)
|
||||
assert.Equal(t, errBatchMsg, "")
|
||||
@@ -837,18 +641,18 @@ func testTimeoutProof(t *testing.T) {
|
||||
|
||||
// create second mock prover, that will send valid proof.
|
||||
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk, version.Version)
|
||||
proverChunkTask2, chunkTask2ErrCode, chunkTask2ErrMsg := chunkProver2.getProverTask(t, message.ProofTypeChunk, "istanbul")
|
||||
proverChunkTask2, chunkTask2ErrCode, chunkTask2ErrMsg := chunkProver2.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.NotNil(t, proverChunkTask2)
|
||||
assert.Equal(t, chunkTask2ErrCode, types.Success)
|
||||
assert.Equal(t, chunkTask2ErrMsg, "")
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul")
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
|
||||
|
||||
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
|
||||
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
|
||||
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch)
|
||||
assert.NotNil(t, proverBatchTask2)
|
||||
assert.Equal(t, batchTask2ErrCode, types.Success)
|
||||
assert.Equal(t, batchTask2ErrMsg, "")
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul")
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
|
||||
|
||||
// verify proof status, it should be verified now, because second prover sent valid proof
|
||||
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
|
||||
@@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
|
||||
}
|
||||
|
||||
// connectToCoordinator sets up a websocket client to connect to the prover manager.
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string {
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T, proverTypes []types.ProverType) (string, int, string) {
|
||||
challengeString := r.challenge(t)
|
||||
return r.login(t, challengeString, forkName)
|
||||
return r.login(t, challengeString, proverTypes)
|
||||
}
|
||||
|
||||
func (r *mockProver) challenge(t *testing.T) string {
|
||||
@@ -76,43 +76,35 @@ func (r *mockProver) challenge(t *testing.T) string {
|
||||
return loginData.Token
|
||||
}
|
||||
|
||||
func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
|
||||
var body string
|
||||
if forkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
HardForkName: forkName,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
|
||||
func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []types.ProverType) (string, int, string) {
|
||||
authMsg := types.LoginParameter{
|
||||
Message: types.Message{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
ProverTypes: proverTypes,
|
||||
VKs: []string{"mock_vk"},
|
||||
},
|
||||
PublicKey: r.publicKey(),
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body, err := json.Marshal(authMsg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var result ctypes.Response
|
||||
client := resty.New()
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", challengeString)).
|
||||
SetBody([]byte(body)).
|
||||
SetBody(body).
|
||||
SetResult(&result).
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/login")
|
||||
assert.NoError(t, err)
|
||||
|
||||
if result.ErrCode != 0 {
|
||||
return "", result.ErrCode, result.ErrMsg
|
||||
}
|
||||
|
||||
type login struct {
|
||||
Time string `json:"time"`
|
||||
Token string `json:"token"`
|
||||
@@ -122,7 +114,7 @@ func (r *mockProver) login(t *testing.T, challengeString string, forkName string
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode())
|
||||
assert.Empty(t, result.ErrMsg)
|
||||
return loginData.Token
|
||||
return loginData.Token, 0, ""
|
||||
}
|
||||
|
||||
func (r *mockProver) healthCheckSuccess(t *testing.T) bool {
|
||||
@@ -149,9 +141,12 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
|
||||
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*types.GetTaskSchema, int, string) {
|
||||
// get task from coordinator
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
|
||||
if errCode != 0 {
|
||||
return nil, errCode, errMsg
|
||||
}
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
@@ -176,9 +171,12 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
|
||||
// Testing expected errors returned by coordinator.
|
||||
//
|
||||
//nolint:unparam
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) {
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
|
||||
// get task from coordinator
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
|
||||
if errCode != 0 {
|
||||
return errCode, errMsg
|
||||
}
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
@@ -201,50 +199,56 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType,
|
||||
return result.ErrCode, result.ErrMsg
|
||||
}
|
||||
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) {
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
|
||||
proofMsgStatus := message.StatusOk
|
||||
if proofStatus == generatedFailed {
|
||||
proofMsgStatus = message.StatusProofError
|
||||
}
|
||||
|
||||
proof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: proverTaskSchema.TaskID,
|
||||
Type: message.ProofType(proverTaskSchema.TaskType),
|
||||
Status: proofMsgStatus,
|
||||
ChunkProof: &message.ChunkProof{},
|
||||
BatchProof: &message.BatchProof{},
|
||||
},
|
||||
var proof []byte
|
||||
switch proverTaskSchema.TaskType {
|
||||
case int(message.ProofTypeChunk):
|
||||
encodeData, err := json.Marshal(message.ChunkProof{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
case int(message.ProofTypeBatch):
|
||||
encodeData, err := json.Marshal(message.BatchProof{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
}
|
||||
|
||||
if proofStatus == generatedFailed {
|
||||
proof.Status = message.StatusProofError
|
||||
} else if proofStatus == verifiedFailed {
|
||||
proof.ProofDetail.ChunkProof.Proof = []byte(verifier.InvalidTestProof)
|
||||
proof.ProofDetail.BatchProof.Proof = []byte(verifier.InvalidTestProof)
|
||||
if proofStatus == verifiedFailed {
|
||||
switch proverTaskSchema.TaskType {
|
||||
case int(message.ProofTypeChunk):
|
||||
chunkProof := message.ChunkProof{}
|
||||
chunkProof.Proof = []byte(verifier.InvalidTestProof)
|
||||
encodeData, err := json.Marshal(&chunkProof)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
case int(message.ProofTypeBatch):
|
||||
batchProof := message.BatchProof{}
|
||||
batchProof.Proof = []byte(verifier.InvalidTestProof)
|
||||
encodeData, err := json.Marshal(&batchProof)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
}
|
||||
}
|
||||
|
||||
assert.NoError(t, proof.Sign(r.privKey))
|
||||
submitProof := types.SubmitProofParameter{
|
||||
TaskID: proof.ID,
|
||||
TaskType: int(proof.Type),
|
||||
Status: int(proof.Status),
|
||||
UUID: proverTaskSchema.UUID,
|
||||
TaskID: proverTaskSchema.TaskID,
|
||||
TaskType: proverTaskSchema.TaskType,
|
||||
Status: int(proofMsgStatus),
|
||||
Proof: string(proof),
|
||||
}
|
||||
|
||||
switch proof.Type {
|
||||
case message.ProofTypeChunk:
|
||||
encodeData, err := json.Marshal(proof.ChunkProof)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
submitProof.Proof = string(encodeData)
|
||||
case message.ProofTypeBatch:
|
||||
encodeData, err := json.Marshal(proof.BatchProof)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
submitProof.Proof = string(encodeData)
|
||||
}
|
||||
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})
|
||||
assert.Equal(t, authErrCode, 0)
|
||||
assert.Equal(t, errMsg, "")
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
submitProofData, err := json.Marshal(submitProof)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",
|
||||
"dsn": "postgres://localhost/scroll?sslmode=disable",
|
||||
"driver_name": "postgres",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
|
||||
@@ -6,7 +6,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
)
|
||||
@@ -33,11 +33,11 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/tools v0.17.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
|
||||
@@ -121,8 +121,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
@@ -155,20 +155,20 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y
|
||||
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
|
||||
@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(20), cur)
|
||||
assert.Equal(t, int64(22), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(20), cur)
|
||||
assert.Equal(t, int64(22), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(20), version)
|
||||
assert.Equal(t, int64(22), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
|
||||
54
database/migrate/migrations/00021_bundle.sql
Normal file
54
database/migrate/migrations/00021_bundle.sql
Normal file
@@ -0,0 +1,54 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
CREATE TABLE bundle (
|
||||
index BIGSERIAL PRIMARY KEY,
|
||||
hash VARCHAR NOT NULL, -- Not part of DA hash, used for SQL query consistency and ease of use, derived using keccak256(concat(start_batch_hash_bytes, end_batch_hash_bytes)).
|
||||
start_batch_index BIGINT NOT NULL,
|
||||
end_batch_index BIGINT NOT NULL,
|
||||
start_batch_hash VARCHAR NOT NULL,
|
||||
end_batch_hash VARCHAR NOT NULL,
|
||||
codec_version SMALLINT NOT NULL,
|
||||
|
||||
-- proof
|
||||
batch_proofs_status SMALLINT NOT NULL DEFAULT 1,
|
||||
proving_status SMALLINT NOT NULL DEFAULT 1,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
|
||||
proved_at TIMESTAMP(0) DEFAULT NULL,
|
||||
proof_time_sec INTEGER DEFAULT NULL,
|
||||
total_attempts SMALLINT NOT NULL DEFAULT 0,
|
||||
active_attempts SMALLINT NOT NULL DEFAULT 0,
|
||||
|
||||
-- rollup
|
||||
rollup_status SMALLINT NOT NULL DEFAULT 1,
|
||||
finalize_tx_hash VARCHAR DEFAULT NULL,
|
||||
finalized_at TIMESTAMP(0) DEFAULT NULL,
|
||||
|
||||
-- metadata
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX idx_bundle_index_rollup_status ON bundle(index, rollup_status) WHERE deleted_at IS NULL;
|
||||
CREATE INDEX idx_bundle_hash ON bundle(hash) WHERE deleted_at IS NULL;
|
||||
CREATE INDEX idx_bundle_hash_proving_status ON bundle(hash, proving_status) WHERE deleted_at IS NULL;
|
||||
CREATE INDEX idx_bundle_index_desc ON bundle(index DESC) WHERE deleted_at IS NULL;
|
||||
CREATE INDEX idx_bundle_batch_proofs_status ON bundle(batch_proofs_status) WHERE deleted_at IS NULL;
|
||||
CREATE INDEX idx_bundle_start_batch_index ON bundle(start_batch_index) WHERE deleted_at IS NULL;
|
||||
CREATE INDEX idx_bundle_end_batch_index ON bundle(end_batch_index) WHERE deleted_at IS NULL;
|
||||
create index idx_bundle_total_attempts_active_attempts_batch_proofs_status
|
||||
on bundle (total_attempts, active_attempts, batch_proofs_status)
|
||||
where deleted_at IS NULL;
|
||||
|
||||
COMMENT ON COLUMN bundle.batch_proofs_status IS 'undefined, pending, ready';
|
||||
COMMENT ON COLUMN bundle.proving_status IS 'undefined, unassigned, assigned, proved (deprecated), verified, failed';
|
||||
COMMENT ON COLUMN bundle.rollup_status IS 'undefined, pending, committing (not used for bundles), committed (not used for bundles), finalizing, finalized, commit_failed (not used for bundles), finalize_failed';
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE IF EXISTS bundle;
|
||||
-- +goose StatementEnd
|
||||
@@ -0,0 +1,23 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE batch
|
||||
ADD COLUMN bundle_hash VARCHAR DEFAULT '',
|
||||
ADD COLUMN codec_version SMALLINT DEFAULT 0;
|
||||
|
||||
CREATE INDEX idx_batch_bundle_hash ON batch(bundle_hash);
|
||||
CREATE INDEX idx_batch_index_codec_version ON batch(index, codec_version);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
DROP INDEX IF EXISTS idx_batch_bundle_hash;
|
||||
DROP INDEX IF EXISTS idx_batch_index_codec_version;
|
||||
|
||||
ALTER TABLE IF EXISTS batch
|
||||
DROP COLUMN IF EXISTS bundle_hash,
|
||||
DROP COLUMN IF EXISTS codec_version;
|
||||
|
||||
-- +goose StatementEnd
|
||||
824
go.work.sum
824
go.work.sum
File diff suppressed because it is too large
Load Diff
732
prover/Cargo.lock
generated
732
prover/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -29,8 +29,8 @@ ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", branch = "v0.10", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
prover_next = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.5", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
prover_curie = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.5", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
prover_darwin = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.0-rc.3", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
base64 = "0.13.1"
|
||||
reqwest = { version = "0.12.4", features = ["gzip"] }
|
||||
reqwest-middleware = "0.3"
|
||||
|
||||
@@ -37,14 +37,10 @@ endif
|
||||
|
||||
prover:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
|
||||
rm -rf ./lib && mkdir ./lib
|
||||
find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib
|
||||
|
||||
tests_binary:
|
||||
cargo clean && cargo test --release --no-run
|
||||
ls target/release/deps/prover* | grep -v "\.d" | xargs -I{} ln -sf {} ./prover.test
|
||||
rm -rf ./lib && mkdir ./lib
|
||||
find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib
|
||||
|
||||
lint:
|
||||
cargo check --all-features
|
||||
|
||||
@@ -2,7 +2,7 @@ use anyhow::{bail, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs::File;
|
||||
|
||||
use crate::types::ProofType;
|
||||
use crate::types::ProverType;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
@@ -24,14 +24,13 @@ pub struct L2GethConfig {
|
||||
pub endpoint: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub prover_name: String,
|
||||
pub keystore_path: String,
|
||||
pub keystore_password: String,
|
||||
pub db_path: String,
|
||||
#[serde(default)]
|
||||
pub proof_type: ProofType,
|
||||
pub prover_type: ProverType,
|
||||
pub low_version_circuit: CircuitConfig,
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
pub coordinator: CoordinatorConfig,
|
||||
|
||||
@@ -21,6 +21,7 @@ pub struct CoordinatorClient<'a> {
|
||||
key_signer: Rc<KeySigner>,
|
||||
rt: Runtime,
|
||||
listener: Box<dyn Listener>,
|
||||
vks: Vec<String>,
|
||||
}
|
||||
|
||||
impl<'a> CoordinatorClient<'a> {
|
||||
@@ -28,6 +29,7 @@ impl<'a> CoordinatorClient<'a> {
|
||||
config: &'a Config,
|
||||
key_signer: Rc<KeySigner>,
|
||||
listener: Box<dyn Listener>,
|
||||
vks: Vec<String>,
|
||||
) -> Result<Self> {
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
@@ -46,6 +48,7 @@ impl<'a> CoordinatorClient<'a> {
|
||||
key_signer,
|
||||
rt,
|
||||
listener,
|
||||
vks,
|
||||
};
|
||||
client.login()?;
|
||||
Ok(client)
|
||||
@@ -68,12 +71,15 @@ impl<'a> CoordinatorClient<'a> {
|
||||
challenge: token.clone(),
|
||||
prover_name: self.config.prover_name.clone(),
|
||||
prover_version: crate::version::get_version(),
|
||||
prover_types: vec![self.config.prover_type],
|
||||
vks: self.vks.clone(),
|
||||
};
|
||||
|
||||
let buffer = login_message.rlp();
|
||||
let buffer = rlp::encode(&login_message);
|
||||
let signature = self.key_signer.sign_buffer(&buffer)?;
|
||||
let login_request = LoginRequest {
|
||||
message: login_message,
|
||||
public_key: self.key_signer.get_public_key(),
|
||||
signature,
|
||||
};
|
||||
let login_response = self.rt.block_on(api.login(&login_request, &token))?;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use super::errors::ErrorCode;
|
||||
use crate::types::{ProofFailureType, ProofStatus};
|
||||
use rlp::RlpStream;
|
||||
use crate::types::{ProofFailureType, ProofStatus, ProverType, TaskType};
|
||||
use rlp::{Encodable, RlpStream};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -15,23 +15,36 @@ pub struct LoginMessage {
|
||||
pub challenge: String,
|
||||
pub prover_name: String,
|
||||
pub prover_version: String,
|
||||
pub prover_types: Vec<ProverType>,
|
||||
pub vks: Vec<String>,
|
||||
}
|
||||
|
||||
impl LoginMessage {
|
||||
pub fn rlp(&self) -> Vec<u8> {
|
||||
let mut rlp = RlpStream::new();
|
||||
let num_fields = 3;
|
||||
rlp.begin_list(num_fields);
|
||||
rlp.append(&self.prover_name);
|
||||
rlp.append(&self.prover_version);
|
||||
rlp.append(&self.challenge);
|
||||
rlp.out().freeze().into()
|
||||
impl Encodable for LoginMessage {
|
||||
fn rlp_append(&self, s: &mut RlpStream) {
|
||||
let num_fields = 5;
|
||||
s.begin_list(num_fields);
|
||||
s.append(&self.challenge);
|
||||
s.append(&self.prover_version);
|
||||
s.append(&self.prover_name);
|
||||
// The ProverType in go side is an type alias of uint8
|
||||
// A uint8 slice is treated as a string when doing the rlp encoding
|
||||
let prover_types = self
|
||||
.prover_types
|
||||
.iter()
|
||||
.map(|prover_type: &ProverType| prover_type.to_u8())
|
||||
.collect::<Vec<u8>>();
|
||||
s.append(&prover_types);
|
||||
s.begin_list(self.vks.len());
|
||||
for vk in &self.vks {
|
||||
s.append(vk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoginRequest {
|
||||
pub message: LoginMessage,
|
||||
pub public_key: String,
|
||||
pub signature: String,
|
||||
}
|
||||
|
||||
@@ -45,16 +58,15 @@ pub type ChallengeResponseData = LoginResponseData;
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub struct GetTaskRequest {
|
||||
pub task_type: crate::types::ProofType,
|
||||
pub task_types: Vec<TaskType>,
|
||||
pub prover_height: Option<u64>,
|
||||
pub vks: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct GetTaskResponseData {
|
||||
pub uuid: String,
|
||||
pub task_id: String,
|
||||
pub task_type: crate::types::ProofType,
|
||||
pub task_type: TaskType,
|
||||
pub task_data: String,
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
@@ -63,12 +75,11 @@ pub struct GetTaskResponseData {
|
||||
pub struct SubmitProofRequest {
|
||||
pub uuid: String,
|
||||
pub task_id: String,
|
||||
pub task_type: crate::types::ProofType,
|
||||
pub task_type: TaskType,
|
||||
pub status: ProofStatus,
|
||||
pub proof: String,
|
||||
pub failure_type: Option<ProofFailureType>,
|
||||
pub failure_msg: Option<String>,
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
|
||||
@@ -65,7 +65,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
log::info!(
|
||||
"prover start successfully. name: {}, type: {:?}, publickey: {}, version: {}",
|
||||
config.prover_name,
|
||||
config.proof_type,
|
||||
config.prover_type,
|
||||
prover.get_public_key(),
|
||||
version::get_version(),
|
||||
);
|
||||
|
||||
@@ -8,7 +8,8 @@ use crate::{
|
||||
coordinator_client::{listener::Listener, types::*, CoordinatorClient},
|
||||
geth_client::GethClient,
|
||||
key_signer::KeySigner,
|
||||
types::{ProofFailureType, ProofStatus, ProofType},
|
||||
types::{ProofFailureType, ProofStatus, ProverType},
|
||||
utils::get_task_types,
|
||||
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
|
||||
};
|
||||
|
||||
@@ -24,16 +25,11 @@ pub struct Prover<'a> {
|
||||
|
||||
impl<'a> Prover<'a> {
|
||||
pub fn new(config: &'a Config, coordinator_listener: Box<dyn Listener>) -> Result<Self> {
|
||||
let proof_type = config.proof_type;
|
||||
let prover_type = config.prover_type;
|
||||
let keystore_path = &config.keystore_path;
|
||||
let keystore_password = &config.keystore_password;
|
||||
|
||||
let key_signer = Rc::new(KeySigner::new(keystore_path, keystore_password)?);
|
||||
let coordinator_client =
|
||||
CoordinatorClient::new(config, Rc::clone(&key_signer), coordinator_listener)
|
||||
.context("failed to create coordinator_client")?;
|
||||
|
||||
let geth_client = if config.proof_type == ProofType::Chunk {
|
||||
let geth_client = if config.prover_type == ProverType::Chunk {
|
||||
Some(Rc::new(RefCell::new(
|
||||
GethClient::new(
|
||||
&config.prover_name,
|
||||
@@ -45,9 +41,16 @@ impl<'a> Prover<'a> {
|
||||
None
|
||||
};
|
||||
|
||||
let provider = CircuitsHandlerProvider::new(proof_type, config, geth_client.clone())
|
||||
let provider = CircuitsHandlerProvider::new(prover_type, config, geth_client.clone())
|
||||
.context("failed to create circuits handler provider")?;
|
||||
|
||||
let vks = provider.init_vks(prover_type, config, geth_client.clone());
|
||||
|
||||
let key_signer = Rc::new(KeySigner::new(keystore_path, keystore_password)?);
|
||||
let coordinator_client =
|
||||
CoordinatorClient::new(config, Rc::clone(&key_signer), coordinator_listener, vks)
|
||||
.context("failed to create coordinator_client")?;
|
||||
|
||||
let prover = Prover {
|
||||
config,
|
||||
key_signer: Rc::clone(&key_signer),
|
||||
@@ -59,10 +62,6 @@ impl<'a> Prover<'a> {
|
||||
Ok(prover)
|
||||
}
|
||||
|
||||
pub fn get_proof_type(&self) -> ProofType {
|
||||
self.config.proof_type
|
||||
}
|
||||
|
||||
pub fn get_public_key(&self) -> String {
|
||||
self.key_signer.get_public_key()
|
||||
}
|
||||
@@ -70,12 +69,12 @@ impl<'a> Prover<'a> {
|
||||
pub fn fetch_task(&self) -> Result<Task> {
|
||||
log::info!("[prover] start to fetch_task");
|
||||
let mut req = GetTaskRequest {
|
||||
task_type: self.get_proof_type(),
|
||||
task_types: get_task_types(self.config.prover_type),
|
||||
prover_height: None,
|
||||
vks: self.circuits_handler_provider.borrow().get_vks(),
|
||||
// vks: self.circuits_handler_provider.borrow().get_vks(),
|
||||
};
|
||||
|
||||
if self.get_proof_type() == ProofType::Chunk {
|
||||
if self.config.prover_type == ProverType::Chunk {
|
||||
let latest_block_number = self.get_latest_block_number_value()?;
|
||||
if let Some(v) = latest_block_number {
|
||||
if v.as_u64() == 0 {
|
||||
@@ -130,7 +129,6 @@ impl<'a> Prover<'a> {
|
||||
task_type: proof_detail.proof_type,
|
||||
status: ProofStatus::Ok,
|
||||
proof: proof_detail.proof_data,
|
||||
hard_fork_name: task.hard_fork_name.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -150,8 +148,7 @@ impl<'a> Prover<'a> {
|
||||
task_type: task.task_type,
|
||||
status: ProofStatus::Error,
|
||||
failure_type: Some(failure_type),
|
||||
failure_msg: Some(error.to_string()),
|
||||
hard_fork_name: task.hard_fork_name.clone(),
|
||||
failure_msg: Some(format!("{:#}", error)),
|
||||
..Default::default()
|
||||
};
|
||||
self.do_submit(&request)
|
||||
|
||||
@@ -6,57 +6,107 @@ use crate::coordinator_client::types::GetTaskResponseData;
|
||||
pub type CommonHash = H256;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofType {
|
||||
pub enum TaskType {
|
||||
Undefined,
|
||||
Chunk,
|
||||
Batch,
|
||||
Bundle,
|
||||
}
|
||||
|
||||
impl ProofType {
|
||||
impl TaskType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => ProofType::Chunk,
|
||||
2 => ProofType::Batch,
|
||||
_ => ProofType::Undefined,
|
||||
1 => TaskType::Chunk,
|
||||
2 => TaskType::Batch,
|
||||
3 => TaskType::Bundle,
|
||||
_ => TaskType::Undefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofType {
|
||||
impl Serialize for TaskType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofType::Undefined => serializer.serialize_i8(0),
|
||||
ProofType::Chunk => serializer.serialize_i8(1),
|
||||
ProofType::Batch => serializer.serialize_i8(2),
|
||||
TaskType::Undefined => serializer.serialize_u8(0),
|
||||
TaskType::Chunk => serializer.serialize_u8(1),
|
||||
TaskType::Batch => serializer.serialize_u8(2),
|
||||
TaskType::Bundle => serializer.serialize_u8(3),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofType {
|
||||
impl<'de> Deserialize<'de> for TaskType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofType::from_u8(v))
|
||||
Ok(TaskType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofType {
|
||||
impl Default for TaskType {
|
||||
fn default() -> Self {
|
||||
Self::Undefined
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProverType {
|
||||
Chunk,
|
||||
Batch,
|
||||
}
|
||||
|
||||
impl ProverType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => ProverType::Chunk,
|
||||
2 => ProverType::Batch,
|
||||
_ => {
|
||||
panic!("invalid prover_type")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_u8(self) -> u8 {
|
||||
match self {
|
||||
ProverType::Chunk => 1,
|
||||
ProverType::Batch => 2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProverType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProverType::Chunk => serializer.serialize_u8(1),
|
||||
ProverType::Batch => serializer.serialize_u8(2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProverType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProverType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct Task {
|
||||
pub uuid: String,
|
||||
pub id: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub task_type: ProofType,
|
||||
pub task_type: TaskType,
|
||||
pub task_data: String,
|
||||
#[serde(default)]
|
||||
pub hard_fork_name: String,
|
||||
@@ -100,7 +150,7 @@ impl From<Task> for TaskWrapper {
|
||||
pub struct ProofDetail {
|
||||
pub id: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub proof_type: ProofType,
|
||||
pub proof_type: TaskType,
|
||||
pub proof_data: String,
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use env_logger::Env;
|
||||
use std::{fs::OpenOptions, sync::Once};
|
||||
|
||||
use crate::types::{ProverType, TaskType};
|
||||
|
||||
static LOG_INIT: Once = Once::new();
|
||||
|
||||
/// Initialize log
|
||||
@@ -21,3 +23,10 @@ pub fn log_init(log_file: Option<String>) {
|
||||
builder.init();
|
||||
});
|
||||
}
|
||||
|
||||
pub fn get_task_types(prover_type: ProverType) -> Vec<TaskType> {
|
||||
match prover_type {
|
||||
ProverType::Chunk => vec![TaskType::Chunk],
|
||||
ProverType::Batch => vec![TaskType::Batch, TaskType::Bundle],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
mod bernoulli;
|
||||
mod curie;
|
||||
mod darwin;
|
||||
|
||||
use super::geth_client::GethClient;
|
||||
use crate::{
|
||||
config::{AssetsDirEnvConfig, Config},
|
||||
types::{ProofType, Task},
|
||||
types::{ProverType, Task, TaskType},
|
||||
utils::get_task_types,
|
||||
};
|
||||
use anyhow::{bail, Result};
|
||||
use bernoulli::BaseCircuitsHandler;
|
||||
use curie::NextCircuitsHandler;
|
||||
use curie::CurieHandler;
|
||||
use darwin::DarwinHandler;
|
||||
use std::{cell::RefCell, collections::HashMap, rc::Rc};
|
||||
|
||||
type HardForkName = String;
|
||||
@@ -20,38 +21,37 @@ pub mod utils {
|
||||
}
|
||||
|
||||
pub trait CircuitsHandler {
|
||||
fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>>;
|
||||
fn get_vk(&self, task_type: TaskType) -> Option<Vec<u8>>;
|
||||
|
||||
fn get_proof_data(&self, task_type: ProofType, task: &Task) -> Result<String>;
|
||||
fn get_proof_data(&self, task_type: TaskType, task: &Task) -> Result<String>;
|
||||
}
|
||||
|
||||
type CircuitsHandlerBuilder = fn(
|
||||
proof_type: ProofType,
|
||||
prover_type: ProverType,
|
||||
config: &Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Box<dyn CircuitsHandler>>;
|
||||
|
||||
pub struct CircuitsHandlerProvider<'a> {
|
||||
proof_type: ProofType,
|
||||
prover_type: ProverType,
|
||||
config: &'a Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
circuits_handler_builder_map: HashMap<HardForkName, CircuitsHandlerBuilder>,
|
||||
|
||||
current_hard_fork_name: Option<HardForkName>,
|
||||
current_circuit: Option<Rc<Box<dyn CircuitsHandler>>>,
|
||||
vks: Vec<String>,
|
||||
}
|
||||
|
||||
impl<'a> CircuitsHandlerProvider<'a> {
|
||||
pub fn new(
|
||||
proof_type: ProofType,
|
||||
prover_type: ProverType,
|
||||
config: &'a Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
let mut m: HashMap<HardForkName, CircuitsHandlerBuilder> = HashMap::new();
|
||||
|
||||
fn handler_builder(
|
||||
proof_type: ProofType,
|
||||
prover_type: ProverType,
|
||||
config: &Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Box<dyn CircuitsHandler>> {
|
||||
@@ -60,8 +60,8 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
&config.low_version_circuit.hard_fork_name
|
||||
);
|
||||
AssetsDirEnvConfig::enable_first();
|
||||
BaseCircuitsHandler::new(
|
||||
proof_type,
|
||||
CurieHandler::new(
|
||||
prover_type,
|
||||
&config.low_version_circuit.params_path,
|
||||
&config.low_version_circuit.assets_path,
|
||||
geth_client,
|
||||
@@ -74,7 +74,7 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
);
|
||||
|
||||
fn next_handler_builder(
|
||||
proof_type: ProofType,
|
||||
prover_type: ProverType,
|
||||
config: &Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Box<dyn CircuitsHandler>> {
|
||||
@@ -83,8 +83,8 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
&config.high_version_circuit.hard_fork_name
|
||||
);
|
||||
AssetsDirEnvConfig::enable_second();
|
||||
NextCircuitsHandler::new(
|
||||
proof_type,
|
||||
DarwinHandler::new(
|
||||
prover_type,
|
||||
&config.high_version_circuit.params_path,
|
||||
&config.high_version_circuit.assets_path,
|
||||
geth_client,
|
||||
@@ -97,16 +97,13 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
next_handler_builder,
|
||||
);
|
||||
|
||||
let vks = CircuitsHandlerProvider::init_vks(proof_type, config, &m, geth_client.clone());
|
||||
|
||||
let provider = CircuitsHandlerProvider {
|
||||
proof_type,
|
||||
prover_type,
|
||||
config,
|
||||
geth_client,
|
||||
circuits_handler_builder_map: m,
|
||||
current_hard_fork_name: None,
|
||||
current_circuit: None,
|
||||
vks,
|
||||
};
|
||||
|
||||
Ok(provider)
|
||||
@@ -132,7 +129,7 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
);
|
||||
if let Some(builder) = self.circuits_handler_builder_map.get(hard_fork_name) {
|
||||
log::info!("building circuits handler for {hard_fork_name}");
|
||||
let handler = builder(self.proof_type, self.config, self.geth_client.clone())
|
||||
let handler = builder(self.prover_type, self.config, self.geth_client.clone())
|
||||
.expect("failed to build circuits handler");
|
||||
self.current_hard_fork_name = Some(hard_fork_name.clone());
|
||||
let rc_handler = Rc::new(handler);
|
||||
@@ -146,27 +143,33 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn init_vks(
|
||||
proof_type: ProofType,
|
||||
pub fn init_vks(
|
||||
&self,
|
||||
prover_type: ProverType,
|
||||
config: &'a Config,
|
||||
circuits_handler_builder_map: &HashMap<HardForkName, CircuitsHandlerBuilder>,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Vec<String> {
|
||||
circuits_handler_builder_map
|
||||
self.circuits_handler_builder_map
|
||||
.iter()
|
||||
.map(|(hard_fork_name, build)| {
|
||||
let handler = build(proof_type, config, geth_client.clone())
|
||||
.flat_map(|(hard_fork_name, build)| {
|
||||
let handler = build(prover_type, config, geth_client.clone())
|
||||
.expect("failed to build circuits handler");
|
||||
let vk = handler
|
||||
.get_vk(proof_type)
|
||||
.map_or("".to_string(), utils::encode_vk);
|
||||
log::info!("vk for {hard_fork_name} is {vk}");
|
||||
vk
|
||||
|
||||
get_task_types(prover_type)
|
||||
.into_iter()
|
||||
.map(|task_type| {
|
||||
let vk = handler
|
||||
.get_vk(task_type)
|
||||
.map_or("".to_string(), utils::encode_vk);
|
||||
log::info!(
|
||||
"vk for {hard_fork_name}, is {vk}, task_type: {:?}",
|
||||
task_type
|
||||
);
|
||||
vk
|
||||
})
|
||||
.filter(|vk| !vk.is_empty())
|
||||
.collect::<Vec<String>>()
|
||||
})
|
||||
.collect::<Vec<String>>()
|
||||
}
|
||||
|
||||
pub fn get_vks(&self) -> Vec<String> {
|
||||
self.vks.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,22 @@
|
||||
use super::CircuitsHandler;
|
||||
use crate::{geth_client::GethClient, types::ProofType};
|
||||
use crate::{
|
||||
geth_client::GethClient,
|
||||
types::{ProverType, TaskType},
|
||||
};
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use once_cell::sync::Lazy;
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::types::{CommonHash, Task};
|
||||
use std::{cell::RefCell, cmp::Ordering, rc::Rc};
|
||||
use std::{cell::RefCell, cmp::Ordering, env, rc::Rc};
|
||||
|
||||
use prover_next::{
|
||||
use prover_curie::{
|
||||
aggregator::Prover as BatchProver, check_chunk_hashes, zkevm::Prover as ChunkProver,
|
||||
BatchProof, BatchProvingTask, BlockTrace, ChunkInfo, ChunkProof, ChunkProvingTask,
|
||||
};
|
||||
|
||||
use super::bernoulli::OUTPUT_DIR;
|
||||
// Only used for debugging.
|
||||
static OUTPUT_DIR: Lazy<Option<String>> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct BatchTaskDetail {
|
||||
@@ -29,33 +34,32 @@ fn get_block_number(block_trace: &BlockTrace) -> Option<u64> {
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct NextCircuitsHandler {
|
||||
pub struct CurieHandler {
|
||||
chunk_prover: Option<RefCell<ChunkProver>>,
|
||||
batch_prover: Option<RefCell<BatchProver>>,
|
||||
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
}
|
||||
|
||||
impl NextCircuitsHandler {
|
||||
impl CurieHandler {
|
||||
pub fn new(
|
||||
proof_type: ProofType,
|
||||
prover_type: ProverType,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
match proof_type {
|
||||
ProofType::Chunk => Ok(Self {
|
||||
match prover_type {
|
||||
ProverType::Chunk => Ok(Self {
|
||||
chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))),
|
||||
batch_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
|
||||
ProofType::Batch => Ok(Self {
|
||||
ProverType::Batch => Ok(Self {
|
||||
batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))),
|
||||
chunk_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
_ => bail!("proof type invalid"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,25 +190,26 @@ impl NextCircuitsHandler {
|
||||
}
|
||||
}
|
||||
|
||||
impl CircuitsHandler for NextCircuitsHandler {
|
||||
fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
|
||||
impl CircuitsHandler for CurieHandler {
|
||||
fn get_vk(&self, task_type: TaskType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
ProofType::Chunk => self
|
||||
TaskType::Chunk => self
|
||||
.chunk_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk()),
|
||||
ProofType::Batch => self
|
||||
TaskType::Batch => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk()),
|
||||
TaskType::Bundle => None,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_proof_data(&self, task_type: ProofType, task: &crate::types::Task) -> Result<String> {
|
||||
fn get_proof_data(&self, task_type: TaskType, task: &crate::types::Task) -> Result<String> {
|
||||
match task_type {
|
||||
ProofType::Chunk => self.gen_chunk_proof(task),
|
||||
ProofType::Batch => self.gen_batch_proof(task),
|
||||
TaskType::Chunk => self.gen_chunk_proof(task),
|
||||
TaskType::Batch => self.gen_batch_proof(task),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
@@ -216,7 +221,7 @@ impl CircuitsHandler for NextCircuitsHandler {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::zk_circuits_handler::utils::encode_vk;
|
||||
use prover_next::utils::chunk_trace_to_witness_block;
|
||||
use prover_curie::utils::chunk_trace_to_witness_block;
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
#[ctor::ctor]
|
||||
@@ -251,12 +256,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_circuits() -> Result<()> {
|
||||
let chunk_handler =
|
||||
NextCircuitsHandler::new(ProofType::Chunk, &PARAMS_PATH, &ASSETS_PATH, None)?;
|
||||
let chunk_handler = CurieHandler::new(ProverType::Chunk, &PARAMS_PATH, &ASSETS_PATH, None)?;
|
||||
|
||||
let chunk_vk = chunk_handler.get_vk(ProofType::Chunk).unwrap();
|
||||
let chunk_vk = chunk_handler.get_vk(TaskType::Chunk).unwrap();
|
||||
|
||||
check_vk(ProofType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
check_vk(TaskType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
let chunk_dir_paths = get_chunk_dir_paths()?;
|
||||
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
|
||||
let mut chunk_infos = vec![];
|
||||
@@ -276,10 +280,9 @@ mod tests {
|
||||
chunk_proofs.push(chunk_proof);
|
||||
}
|
||||
|
||||
let batch_handler =
|
||||
NextCircuitsHandler::new(ProofType::Batch, &PARAMS_PATH, &ASSETS_PATH, None)?;
|
||||
let batch_vk = batch_handler.get_vk(ProofType::Batch).unwrap();
|
||||
check_vk(ProofType::Batch, batch_vk, "batch vk must be available");
|
||||
let batch_handler = CurieHandler::new(ProverType::Batch, &PARAMS_PATH, &ASSETS_PATH, None)?;
|
||||
let batch_vk = batch_handler.get_vk(TaskType::Batch).unwrap();
|
||||
check_vk(TaskType::Batch, batch_vk, "batch vk must be available");
|
||||
let chunk_hashes_proofs = chunk_infos.into_iter().zip(chunk_proofs).collect();
|
||||
log::info!("start to prove batch");
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(chunk_hashes_proofs)?;
|
||||
@@ -289,18 +292,19 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_vk(proof_type: ProofType, vk: Vec<u8>, info: &str) {
|
||||
log::info!("check_vk, {:?}", proof_type);
|
||||
let vk_from_file = read_vk(proof_type).unwrap();
|
||||
fn check_vk(task_type: TaskType, vk: Vec<u8>, info: &str) {
|
||||
log::info!("check_vk, {:?}", task_type);
|
||||
let vk_from_file = read_vk(task_type).unwrap();
|
||||
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
|
||||
}
|
||||
|
||||
fn read_vk(proof_type: ProofType) -> Result<String> {
|
||||
log::info!("read_vk, {:?}", proof_type);
|
||||
let vk_file = match proof_type {
|
||||
ProofType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
ProofType::Batch => BATCH_VK_PATH.clone(),
|
||||
ProofType::Undefined => unreachable!(),
|
||||
fn read_vk(task_type: TaskType) -> Result<String> {
|
||||
log::info!("read_vk, {:?}", task_type);
|
||||
let vk_file = match task_type {
|
||||
TaskType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
TaskType::Batch => BATCH_VK_PATH.clone(),
|
||||
TaskType::Bundle => unreachable!(),
|
||||
TaskType::Undefined => unreachable!(),
|
||||
};
|
||||
|
||||
let data = std::fs::read(vk_file)?;
|
||||
|
||||
@@ -1,27 +1,34 @@
|
||||
use super::CircuitsHandler;
|
||||
use crate::{geth_client::GethClient, types::ProofType};
|
||||
use anyhow::{bail, Ok, Result};
|
||||
use crate::{
|
||||
geth_client::GethClient,
|
||||
types::{ProverType, TaskType},
|
||||
};
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use once_cell::sync::Lazy;
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::types::{CommonHash, Task};
|
||||
use prover::{
|
||||
aggregator::Prover as BatchProver, zkevm::Prover as ChunkProver, BatchProof, BlockTrace,
|
||||
ChunkHash, ChunkProof,
|
||||
};
|
||||
use std::{cell::RefCell, cmp::Ordering, env, rc::Rc};
|
||||
|
||||
// Only used for debugging.
|
||||
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
|
||||
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
|
||||
use prover_darwin::{
|
||||
aggregator::Prover as BatchProver, check_chunk_hashes, zkevm::Prover as ChunkProver,
|
||||
BatchProof, BatchProvingTask, BlockTrace, BundleProof, BundleProvingTask, ChunkInfo,
|
||||
ChunkProof, ChunkProvingTask,
|
||||
};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
// Only used for debugging.
|
||||
static OUTPUT_DIR: Lazy<Option<String>> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct BatchTaskDetail {
|
||||
pub chunk_infos: Vec<ChunkHash>,
|
||||
pub chunk_proofs: Vec<ChunkProof>,
|
||||
pub chunk_infos: Vec<ChunkInfo>,
|
||||
#[serde(flatten)]
|
||||
pub batch_proving_task: BatchProvingTask,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
type BundleTaskDetail = BundleProvingTask;
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
@@ -30,44 +37,44 @@ fn get_block_number(block_trace: &BlockTrace) -> Option<u64> {
|
||||
block_trace.header.number.map(|n| n.as_u64())
|
||||
}
|
||||
|
||||
pub struct BaseCircuitsHandler {
|
||||
#[derive(Default)]
|
||||
pub struct DarwinHandler {
|
||||
chunk_prover: Option<RefCell<ChunkProver>>,
|
||||
batch_prover: Option<RefCell<BatchProver>>,
|
||||
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
}
|
||||
|
||||
impl BaseCircuitsHandler {
|
||||
impl DarwinHandler {
|
||||
pub fn new(
|
||||
proof_type: ProofType,
|
||||
prover_type: ProverType,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
match proof_type {
|
||||
ProofType::Chunk => Ok(Self {
|
||||
match prover_type {
|
||||
ProverType::Chunk => Ok(Self {
|
||||
chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))),
|
||||
batch_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
|
||||
ProofType::Batch => Ok(Self {
|
||||
ProverType::Batch => Ok(Self {
|
||||
batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))),
|
||||
chunk_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
_ => bail!("proof type invalid"),
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk_proof = prover.borrow_mut().gen_chunk_proof(
|
||||
chunk_trace,
|
||||
None,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
let chunk = ChunkProvingTask::from(chunk_trace);
|
||||
|
||||
let chunk_proof =
|
||||
prover
|
||||
.borrow_mut()
|
||||
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
|
||||
|
||||
return Ok(chunk_proof);
|
||||
}
|
||||
@@ -80,22 +87,26 @@ impl BaseCircuitsHandler {
|
||||
Ok(serde_json::to_string(&chunk_proof)?)
|
||||
}
|
||||
|
||||
fn gen_batch_proof_raw(
|
||||
&self,
|
||||
chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>,
|
||||
) -> Result<BatchProof> {
|
||||
fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
|
||||
.chunk_infos
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(batch_task_detail.batch_proving_task.chunk_proofs.clone())
|
||||
.collect();
|
||||
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
let is_valid = prover.borrow_mut().check_chunk_proofs(&chunk_proofs);
|
||||
let is_valid = prover.borrow_mut().check_protocol_of_chunks(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol")
|
||||
}
|
||||
|
||||
let batch_proof = prover.borrow_mut().gen_agg_evm_proof(
|
||||
chunk_hashes_proofs,
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
|
||||
let batch_proof = prover.borrow_mut().gen_batch_proof(
|
||||
batch_task_detail.batch_proving_task,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
@@ -107,12 +118,32 @@ impl BaseCircuitsHandler {
|
||||
|
||||
fn gen_batch_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
log::info!("[circuit] gen_batch_proof for task {}", task.id);
|
||||
let chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)> =
|
||||
self.gen_chunk_hashes_proofs(task)?;
|
||||
let batch_proof = self.gen_batch_proof_raw(chunk_hashes_proofs)?;
|
||||
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
let batch_proof = self.gen_batch_proof_raw(batch_task_detail)?;
|
||||
Ok(serde_json::to_string(&batch_proof)?)
|
||||
}
|
||||
|
||||
fn gen_bundle_proof_raw(&self, bundle_task_detail: BundleTaskDetail) -> Result<BundleProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let bundle_proof = prover.borrow_mut().gen_bundle_proof(
|
||||
bundle_task_detail,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return Ok(bundle_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_bundle_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
log::info!("[circuit] gen_bundle_proof for task {}", task.id);
|
||||
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail)?;
|
||||
Ok(serde_json::to_string(&bundle_proof)?)
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
@@ -122,17 +153,6 @@ impl BaseCircuitsHandler {
|
||||
self.get_sorted_traces_by_hashes(&chunk_task_detail.block_hashes)
|
||||
}
|
||||
|
||||
fn gen_chunk_hashes_proofs(&self, task: &Task) -> Result<Vec<(ChunkHash, ChunkProof)>> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
|
||||
Ok(batch_task_detail
|
||||
.chunk_infos
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(batch_task_detail.chunk_proofs.clone())
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn get_sorted_traces_by_hashes(&self, block_hashes: &[CommonHash]) -> Result<Vec<BlockTrace>> {
|
||||
if block_hashes.is_empty() {
|
||||
log::error!("[prover] failed to get sorted traces: block_hashes are empty");
|
||||
@@ -187,25 +207,30 @@ impl BaseCircuitsHandler {
|
||||
}
|
||||
}
|
||||
|
||||
impl CircuitsHandler for BaseCircuitsHandler {
|
||||
fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
|
||||
impl CircuitsHandler for DarwinHandler {
|
||||
fn get_vk(&self, task_type: TaskType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
ProofType::Chunk => self
|
||||
TaskType::Chunk => self
|
||||
.chunk_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk()),
|
||||
ProofType::Batch => self
|
||||
TaskType::Batch => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk()),
|
||||
.and_then(|prover| prover.borrow().get_batch_vk()),
|
||||
TaskType::Bundle => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_bundle_vk()),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_proof_data(&self, task_type: ProofType, task: &crate::types::Task) -> Result<String> {
|
||||
fn get_proof_data(&self, task_type: TaskType, task: &crate::types::Task) -> Result<String> {
|
||||
match task_type {
|
||||
ProofType::Chunk => self.gen_chunk_proof(task),
|
||||
ProofType::Batch => self.gen_batch_proof(task),
|
||||
TaskType::Chunk => self.gen_chunk_proof(task),
|
||||
TaskType::Batch => self.gen_batch_proof(task),
|
||||
TaskType::Bundle => self.gen_bundle_proof(task),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
@@ -217,7 +242,7 @@ impl CircuitsHandler for BaseCircuitsHandler {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::zk_circuits_handler::utils::encode_vk;
|
||||
use prover::utils::chunk_trace_to_witness_block;
|
||||
use prover_darwin::utils::chunk_trace_to_witness_block;
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
#[ctor::ctor]
|
||||
@@ -228,7 +253,7 @@ mod tests {
|
||||
|
||||
static DEFAULT_WORK_DIR: &str = "/assets";
|
||||
static WORK_DIR: LazyLock<String> = LazyLock::new(|| {
|
||||
std::env::var("BERNOULLI_TEST_DIR")
|
||||
std::env::var("CURIE_TEST_DIR")
|
||||
.unwrap_or(String::from(DEFAULT_WORK_DIR))
|
||||
.trim_end_matches('/')
|
||||
.to_string()
|
||||
@@ -253,11 +278,11 @@ mod tests {
|
||||
#[test]
|
||||
fn test_circuits() -> Result<()> {
|
||||
let chunk_handler =
|
||||
BaseCircuitsHandler::new(ProofType::Chunk, &PARAMS_PATH, &ASSETS_PATH, None)?;
|
||||
DarwinHandler::new(ProverType::Chunk, &PARAMS_PATH, &ASSETS_PATH, None)?;
|
||||
|
||||
let chunk_vk = chunk_handler.get_vk(ProofType::Chunk).unwrap();
|
||||
let chunk_vk = chunk_handler.get_vk(TaskType::Chunk).unwrap();
|
||||
|
||||
check_vk(ProofType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
check_vk(TaskType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
let chunk_dir_paths = get_chunk_dir_paths()?;
|
||||
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
|
||||
let mut chunk_infos = vec![];
|
||||
@@ -278,30 +303,44 @@ mod tests {
|
||||
}
|
||||
|
||||
let batch_handler =
|
||||
BaseCircuitsHandler::new(ProofType::Batch, &PARAMS_PATH, &ASSETS_PATH, None)?;
|
||||
let batch_vk = batch_handler.get_vk(ProofType::Batch).unwrap();
|
||||
check_vk(ProofType::Batch, batch_vk, "batch vk must be available");
|
||||
let chunk_hashes_proofs = chunk_infos.into_iter().zip(chunk_proofs).collect();
|
||||
DarwinHandler::new(ProverType::Batch, &PARAMS_PATH, &ASSETS_PATH, None)?;
|
||||
let batch_vk = batch_handler.get_vk(TaskType::Batch).unwrap();
|
||||
check_vk(TaskType::Batch, batch_vk, "batch vk must be available");
|
||||
let batch_task_detail = make_batch_task_detail(chunk_infos, chunk_proofs);
|
||||
log::info!("start to prove batch");
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(chunk_hashes_proofs)?;
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail)?;
|
||||
let proof_data = serde_json::to_string(&batch_proof)?;
|
||||
dump_proof("batch_proof".to_string(), proof_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_vk(proof_type: ProofType, vk: Vec<u8>, info: &str) {
|
||||
fn make_batch_task_detail(_: Vec<ChunkInfo>, _: Vec<ChunkProof>) -> BatchTaskDetail {
|
||||
todo!();
|
||||
// BatchTaskDetail {
|
||||
// chunk_infos,
|
||||
// batch_proving_task: BatchProvingTask {
|
||||
// parent_batch_hash: todo!(),
|
||||
// parent_state_root: todo!(),
|
||||
// batch_header: todo!(),
|
||||
// chunk_proofs,
|
||||
// },
|
||||
// }
|
||||
}
|
||||
|
||||
fn check_vk(proof_type: TaskType, vk: Vec<u8>, info: &str) {
|
||||
log::info!("check_vk, {:?}", proof_type);
|
||||
let vk_from_file = read_vk(proof_type).unwrap();
|
||||
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
|
||||
}
|
||||
|
||||
fn read_vk(proof_type: ProofType) -> Result<String> {
|
||||
fn read_vk(proof_type: TaskType) -> Result<String> {
|
||||
log::info!("read_vk, {:?}", proof_type);
|
||||
let vk_file = match proof_type {
|
||||
ProofType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
ProofType::Batch => BATCH_VK_PATH.clone(),
|
||||
ProofType::Undefined => unreachable!(),
|
||||
TaskType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
TaskType::Batch => BATCH_VK_PATH.clone(),
|
||||
TaskType::Bundle => todo!(),
|
||||
TaskType::Undefined => unreachable!(),
|
||||
};
|
||||
|
||||
let data = std::fs::read(vk_file)?;
|
||||
@@ -375,9 +414,9 @@ mod tests {
|
||||
Ok(files.into_iter().map(|f| batch_path.join(f)).collect())
|
||||
}
|
||||
|
||||
fn traces_to_chunk_info(chunk_trace: Vec<BlockTrace>) -> Result<ChunkHash> {
|
||||
fn traces_to_chunk_info(chunk_trace: Vec<BlockTrace>) -> Result<ChunkInfo> {
|
||||
let witness_block = chunk_trace_to_witness_block(chunk_trace)?;
|
||||
Ok(ChunkHash::from_witness_block(&witness_block, false))
|
||||
Ok(ChunkInfo::from_witness_block(&witness_block, false))
|
||||
}
|
||||
|
||||
fn dump_proof(id: String, proof_data: String) -> Result<()> {
|
||||
File diff suppressed because one or more lines are too long
@@ -94,6 +94,11 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to create batchProposer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, genesis.Config, db, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create bundleProposer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
|
||||
|
||||
// Watcher loop to fetch missing blocks
|
||||
@@ -110,10 +115,14 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
go utils.Loop(subCtx, 10*time.Second, batchProposer.TryProposeBatch)
|
||||
|
||||
go utils.Loop(subCtx, 10*time.Second, bundleProposer.TryProposeBundle)
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
|
||||
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessPendingBundles)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully", "version", version.Version)
|
||||
|
||||
|
||||
@@ -55,6 +55,7 @@
|
||||
},
|
||||
"enable_test_env_bypass_features": true,
|
||||
"finalize_batch_without_proof_timeout_sec": 7200,
|
||||
"finalize_bundle_without_proof_timeout_sec": 7200,
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313",
|
||||
"commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414",
|
||||
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515",
|
||||
@@ -76,6 +77,10 @@
|
||||
"batch_timeout_sec": 300,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634880
|
||||
},
|
||||
"bundle_proposer_config": {
|
||||
"max_batch_num_per_bundle": 20,
|
||||
"bundle_timeout_sec": 36000
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
|
||||
@@ -10,8 +10,8 @@ require (
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/holiman/uint256 v1.2.4
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240712125636-d7e76c3f54b5
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
@@ -21,7 +21,7 @@ require (
|
||||
require (
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.12.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
@@ -32,7 +32,7 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
@@ -55,7 +55,7 @@ require (
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
@@ -90,21 +90,21 @@ require (
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/smartystreets/assertions v1.13.1 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/net v0.18.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
|
||||
@@ -11,8 +11,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
|
||||
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -54,8 +54,8 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -135,8 +135,8 @@ github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXei
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
@@ -236,10 +236,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240712125636-d7e76c3f54b5 h1:mdgFgYSKbB7JbUPEvqKdXxXlzc3uRwD+dlNA4GsFSoo=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240712125636-d7e76c3f54b5/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4 h1:gheWXra3HdZsz6q+w4LrXy8ybHOO6/t6Kb/V64bR5wE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923 h1:QKgfS8G0btzg7nmFjSjllaxGkns3yg7g2/tG1nWExEI=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240718144756-1875fd490923/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -267,14 +267,14 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0=
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
|
||||
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
@@ -286,8 +286,8 @@ github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6S
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
|
||||
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
@@ -295,8 +295,8 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@@ -306,16 +306,16 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -336,17 +336,15 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
@@ -22,6 +22,8 @@ type L2Config struct {
|
||||
ChunkProposerConfig *ChunkProposerConfig `json:"chunk_proposer_config"`
|
||||
// The batch_proposer config
|
||||
BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"`
|
||||
// The bundle_proposer config
|
||||
BundleProposerConfig *BundleProposerConfig `json:"bundle_proposer_config"`
|
||||
}
|
||||
|
||||
// ChunkProposerConfig loads chunk_proposer configuration items.
|
||||
@@ -44,3 +46,9 @@ type BatchProposerConfig struct {
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
|
||||
}
|
||||
|
||||
// BundleProposerConfig loads bundle_proposer configuration items.
|
||||
type BundleProposerConfig struct {
|
||||
MaxBatchNumPerBundle uint64 `json:"max_batch_num_per_bundle"`
|
||||
BundleTimeoutSec uint64 `json:"bundle_timeout_sec"`
|
||||
}
|
||||
|
||||
@@ -66,6 +66,8 @@ type RelayerConfig struct {
|
||||
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
|
||||
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
|
||||
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
|
||||
// The timeout in seconds for finalizing a bundle without proof, only used when EnableTestEnvBypassFeatures is true.
|
||||
FinalizeBundleWithoutProofTimeoutSec uint64 `json:"finalize_bundle_without_proof_timeout_sec"`
|
||||
}
|
||||
|
||||
// GasOracleConfig The config for updating gas price oracle.
|
||||
|
||||
@@ -2,9 +2,11 @@ package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
@@ -13,6 +15,7 @@ import (
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv1"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv2"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv3"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -43,6 +46,7 @@ type Layer2Relayer struct {
|
||||
l2Client *ethclient.Client
|
||||
|
||||
db *gorm.DB
|
||||
bundleOrm *orm.Bundle
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
@@ -122,6 +126,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
@@ -385,12 +390,18 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
log.Error("failed to construct commitBatch payload codecv1", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
} else { // codecv2
|
||||
} else if !r.chainCfg.IsDarwin(dbChunks[0].StartBlockTime) { // codecv2
|
||||
calldata, blob, err = r.constructCommitBatchPayloadCodecV2(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatch payload codecv2", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
} else { // codecv3
|
||||
calldata, blob, err = r.constructCommitBatchPayloadCodecV3(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatch payload codecv3", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// fallbackGasLimit is non-zero only in sending non-blob transactions.
|
||||
@@ -409,14 +420,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(calldata),
|
||||
"err", err,
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -496,8 +500,53 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessPendingBundles submits proof to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessPendingBundles() {
|
||||
r.metrics.rollupL2RelayerProcessPendingBundlesTotal.Inc()
|
||||
|
||||
bundle, err := r.bundleOrm.GetFirstPendingBundle(r.ctx)
|
||||
if bundle == nil && err == nil {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch first pending L2 bundle", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
status := types.ProvingStatus(bundle.ProvingStatus)
|
||||
switch status {
|
||||
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
|
||||
if r.cfg.EnableTestEnvBypassFeatures && utils.NowUTC().Sub(bundle.CreatedAt) > time.Duration(r.cfg.FinalizeBundleWithoutProofTimeoutSec)*time.Second {
|
||||
if err := r.finalizeBundle(bundle, false); err != nil {
|
||||
log.Error("Failed to finalize timeout bundle without proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", bundle.Hash)
|
||||
r.metrics.rollupL2RelayerProcessPendingBundlesFinalizedTotal.Inc()
|
||||
if err := r.finalizeBundle(bundle, true); err != nil {
|
||||
log.Error("Failed to finalize bundle with proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
|
||||
}
|
||||
|
||||
case types.ProvingTaskFailed:
|
||||
// We were unable to prove this bundle. There are two possibilities:
|
||||
// (a) Prover bug. In this case, we should fix and redeploy the prover.
|
||||
// In the meantime, we continue to commit batches to L1 as well as
|
||||
// proposing and proving chunks, batches and bundles.
|
||||
// (b) Unprovable bundle, e.g. proof overflow. In this case we need to
|
||||
// stop the ledger, fix the limit, revert all the violating blocks,
|
||||
// chunks, batches, bundles and all subsequent ones, and resume,
|
||||
// i.e. this case requires manual resolution.
|
||||
log.Error("bundle proving failed", "index", bundle.Index, "hash", bundle.Hash, "prover assigned at", bundle.ProverAssignedAt, "proved at", bundle.ProvedAt, "proof time sec", bundle.ProofTimeSec)
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessPendingBundles", "proving status", status)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error {
|
||||
// Check batch status before send `finalizeBatch` tx.
|
||||
// Check batch status before sending `finalizeBatch` tx.
|
||||
if r.cfg.ChainMonitor.Enabled {
|
||||
var batchStatus bool
|
||||
batchStatus, err := r.getBatchStatusByIndex(dbBatch)
|
||||
@@ -508,8 +557,8 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
}
|
||||
if !batchStatus {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
|
||||
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", dbBatch.Index)
|
||||
return err
|
||||
log.Error("the batch status is false, stop finalize batch and check the reason", "batch_index", dbBatch.Index)
|
||||
return errors.New("the batch status is false")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -543,7 +592,7 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
if !r.chainCfg.IsBernoulli(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv0
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV0(dbBatch, dbParentBatch, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct commitBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err)
|
||||
return fmt.Errorf("failed to construct finalizeBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
} else if !r.chainCfg.IsCurie(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv1
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
@@ -557,9 +606,9 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct commitBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err)
|
||||
return fmt.Errorf("failed to construct finalizeBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
} else { // codecv2
|
||||
} else if !r.chainCfg.IsDarwin(dbChunks[0].StartBlockTime) { // codecv2
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
@@ -571,8 +620,11 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV2(dbBatch, dbParentBatch, dbChunks, chunks, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct commitBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err)
|
||||
return fmt.Errorf("failed to construct finalizeBatch payload codecv2, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
} else { // codecv3
|
||||
log.Debug("encoding is codecv3, using finalizeBundle instead", "index", dbBatch.Index)
|
||||
return nil
|
||||
}
|
||||
|
||||
txHash, err := r.finalizeSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0)
|
||||
@@ -584,15 +636,7 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(calldata),
|
||||
"err", err,
|
||||
)
|
||||
return err
|
||||
}
|
||||
@@ -626,6 +670,98 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error {
|
||||
// Check batch status before sending `finalizeBundle` tx.
|
||||
if r.cfg.ChainMonitor.Enabled {
|
||||
for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ {
|
||||
tmpBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get batch by index", "batch index", batchIndex, "error", getErr)
|
||||
return getErr
|
||||
}
|
||||
batchStatus, getErr := r.getBatchStatusByIndex(tmpBatch)
|
||||
if getErr != nil {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
|
||||
log.Error("failed to get batch status, please check chain_monitor api server", "batch_index", tmpBatch.Index, "err", getErr)
|
||||
return getErr
|
||||
}
|
||||
if !batchStatus {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
|
||||
log.Error("the batch status is false, stop finalize batch and check the reason", "batch_index", tmpBatch.Index)
|
||||
return errors.New("the batch status is false")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dbBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, bundle.EndBatchIndex)
|
||||
if err != nil {
|
||||
log.Error("failed to get batch by index", "batch index", bundle.EndBatchIndex, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
var aggProof *message.BundleProof
|
||||
if withProof {
|
||||
aggProof, err = r.bundleOrm.GetVerifiedProofByHash(r.ctx, bundle.Hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get verified proof by bundle index: %d, err: %w", bundle.Index, err)
|
||||
}
|
||||
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", bundle.Index, err)
|
||||
}
|
||||
}
|
||||
|
||||
calldata, err := r.constructFinalizeBundlePayloadCodecV3(dbBatch, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct finalizeBundle payload codecv3, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
|
||||
txHash, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0)
|
||||
if err != nil {
|
||||
log.Error("finalizeBundle in layer1 failed", "with proof", withProof, "index", bundle.Index,
|
||||
"start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress, "err", err, "calldata", common.Bytes2Hex(calldata))
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("finalizeBundle in layer1", "with proof", withProof, "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "tx hash", txHash.String())
|
||||
|
||||
// Updating rollup status in database.
|
||||
if err := r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing); err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Updating the proving status when finalizing without proof, thus the coordinator could omit this task.
|
||||
// it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus
|
||||
if !withProof {
|
||||
txErr := r.db.Transaction(func(tx *gorm.DB) error {
|
||||
if updateErr := r.bundleOrm.UpdateProvingStatus(r.ctx, bundle.Hash, types.ProvingTaskVerified); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
if updateErr := r.batchOrm.UpdateProvingStatusByBundleHash(r.ctx, bundle.Hash, types.ProvingTaskVerified); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ {
|
||||
tmpBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex)
|
||||
if getErr != nil {
|
||||
return getErr
|
||||
}
|
||||
if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, tmpBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if txErr != nil {
|
||||
log.Error("Updating chunk and batch proving status when finalizing without proof failure", "bundleHash", bundle.Hash, "err", txErr)
|
||||
}
|
||||
}
|
||||
|
||||
r.metrics.rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// batchStatusResponse the response schema
|
||||
type batchStatusResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
@@ -690,6 +826,36 @@ func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) {
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "confirmation", cfm, "err", err)
|
||||
}
|
||||
case types.SenderTypeFinalizeBatch:
|
||||
if strings.HasPrefix(cfm.ContextID, "finalizeBundle-") {
|
||||
bundleHash := strings.TrimPrefix(cfm.ContextID, "finalizeBundle-")
|
||||
var status types.RollupStatus
|
||||
if cfm.IsSuccessful {
|
||||
status = types.RollupFinalized
|
||||
r.metrics.rollupL2BundlesFinalizedConfirmedTotal.Inc()
|
||||
} else {
|
||||
status = types.RollupFinalizeFailed
|
||||
r.metrics.rollupL2BundlesFinalizedConfirmedFailedTotal.Inc()
|
||||
log.Warn("FinalizeBundleTxType transaction confirmed but failed in layer1", "confirmation", cfm)
|
||||
}
|
||||
|
||||
err := r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(r.ctx, bundleHash, cfm.TxHash.String(), status); err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatusByBundleHash failed", "confirmation", cfm, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundleHash, cfm.TxHash.String(), status); err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "confirmation", cfm, "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Warn("failed to update rollup status of bundle and batches", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var status types.RollupStatus
|
||||
if cfm.IsSuccessful {
|
||||
status = types.RollupFinalized
|
||||
@@ -835,6 +1001,45 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV2(dbBatch *orm.Batch, d
|
||||
return calldata, daBatch.Blob(), nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV3(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv3.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codecv3.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
encodedChunks[i] = daChunk.Encode()
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProofForPointEvaluation()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get blob data proof for point evaluation: %w", err)
|
||||
}
|
||||
|
||||
skippedL1MessageBitmap, _, err := encoding.ConstructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to construct skipped L1 message bitmap: %w", err)
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatchWithBlobProof", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, skippedL1MessageBitmap, blobDataProof)
|
||||
if packErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to pack commitBatchWithBlobProof: %w", packErr)
|
||||
}
|
||||
return calldata, daBatch.Blob(), nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, aggProof *message.BatchProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBatch with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
@@ -963,6 +1168,34 @@ func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV2(dbBatch *orm.Batch,
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV3(dbBatch *orm.Batch, aggProof *message.BundleProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBundle with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBundleWithProof",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBundleWithProof: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// finalizeBundle without proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBundle",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBundle: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
|
||||
// for unit test
|
||||
func (r *Layer2Relayer) StopSenders() {
|
||||
|
||||
@@ -23,6 +23,11 @@ type l2RelayerMetrics struct {
|
||||
rollupL2UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2ChainMonitorLatestFailedCall prometheus.Counter
|
||||
rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBundlesTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBundlesFinalizedTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal prometheus.Counter
|
||||
rollupL2BundlesFinalizedConfirmedTotal prometheus.Counter
|
||||
rollupL2BundlesFinalizedConfirmedFailedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -93,6 +98,26 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
Name: "rollup_layer2_chain_monitor_latest_failed_batch_status",
|
||||
Help: "The total number of failed batch status get from chain_monitor",
|
||||
}),
|
||||
rollupL2RelayerProcessPendingBundlesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_relayer_process_pending_bundles_total",
|
||||
Help: "Total number of times the layer2 relayer has processed pending bundles.",
|
||||
}),
|
||||
rollupL2RelayerProcessPendingBundlesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_relayer_process_pending_bundles_finalized_total",
|
||||
Help: "Total number of times the layer2 relayer has finalized proven bundle processes.",
|
||||
}),
|
||||
rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_relayer_process_pending_bundles_finalized_success_total",
|
||||
Help: "Total number of times the layer2 relayer has successful finalized proven bundle processes.",
|
||||
}),
|
||||
rollupL2BundlesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_bundles_finalized_confirmed_total",
|
||||
Help: "Total number of finalized bundles confirmed on layer2.",
|
||||
}),
|
||||
rollupL2BundlesFinalizedConfirmedFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_bundles_finalized_confirmed_failed_total",
|
||||
Help: "Total number of failed confirmations for finalized bundles on layer2.",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2RelayerMetric
|
||||
|
||||
@@ -51,15 +51,21 @@ func testCreateNewRelayer(t *testing.T) {
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2}
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{}
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig.BernoulliBlock = big.NewInt(0)
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
}
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
@@ -107,9 +113,13 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{}
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig.BernoulliBlock = big.NewInt(0)
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -149,7 +159,9 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
@@ -163,6 +175,66 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBundles(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV3}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV3 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = bundleOrm.UpdateRollupStatus(context.Background(), bundle.Hash, types.RollupPending)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = bundleOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBundles()
|
||||
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(bundles))
|
||||
// no valid proof, rollup status remains the same
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus))
|
||||
|
||||
proof := &message.BundleProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, proof, types.ProvingTaskVerified, 600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBundles()
|
||||
bundles, err = bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(bundles))
|
||||
assert.Equal(t, types.RollupFinalizing, types.RollupStatus(bundles[0].RollupStatus))
|
||||
relayer.StopSenders()
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2}
|
||||
for _, codecVersion := range codecVersions {
|
||||
@@ -172,9 +244,13 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
|
||||
chainConfig := ¶ms.ChainConfig{}
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig.BernoulliBlock = big.NewInt(0)
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -205,29 +281,108 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
assert.Eventually(t, func() bool {
|
||||
relayer.ProcessCommittedBatches()
|
||||
time.Sleep(time.Second)
|
||||
|
||||
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
|
||||
if batchErr != nil {
|
||||
return false
|
||||
}
|
||||
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
|
||||
if chunkErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing &&
|
||||
types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
|
||||
if chunkErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
|
||||
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
return batchStatus && chunkStatus
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}, 5*time.Second, 100*time.Millisecond, "Batch or Chunk status did not update as expected")
|
||||
relayer.StopSenders()
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV3}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBundleWithoutProofTimeoutSec = 0
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV3 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
relayer.ProcessPendingBundles()
|
||||
|
||||
bundleInDB, bundleErr := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
if bundleErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
bundleStatus := len(bundleInDB) == 1 && types.RollupStatus(bundleInDB[0].RollupStatus) == types.RollupFinalizing &&
|
||||
types.ProvingStatus(bundleInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
|
||||
if batchErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
batchStatus := len(batchInDB) == 1 && types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
|
||||
if chunkErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
|
||||
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
return bundleStatus && batchStatus && chunkStatus
|
||||
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch or Chunk status did not update as expected")
|
||||
relayer.StopSenders()
|
||||
}
|
||||
}
|
||||
@@ -288,7 +443,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
func testL2RelayerFinalizeBatchConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -344,6 +499,75 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// Simulate message confirmations.
|
||||
isSuccessful := []bool{true, false}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
batchHashes := make([]string, len(isSuccessful))
|
||||
bundleHashes := make([]string, len(isSuccessful))
|
||||
for i := range batchHashes {
|
||||
batch := &encoding.Batch{
|
||||
Index: uint64(i + 1),
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV3)
|
||||
assert.NoError(t, err)
|
||||
bundleHashes[i] = bundle.Hash
|
||||
|
||||
err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for i, bundleHash := range bundleHashes {
|
||||
l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{
|
||||
ContextID: "finalizeBundle-" + bundleHash,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
SenderType: types.SenderTypeFinalizeBatch,
|
||||
})
|
||||
}
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
expectedStatuses := []types.RollupStatus{
|
||||
types.RollupFinalized,
|
||||
types.RollupFinalizeFailed,
|
||||
}
|
||||
|
||||
for i, bundleHash := range bundleHashes {
|
||||
bundleInDB, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundleHash}, nil, 0)
|
||||
if err != nil || len(bundleInDB) != 1 || types.RollupStatus(bundleInDB[0].RollupStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
|
||||
batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHashes[i]}, nil, 0)
|
||||
if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch status did not update as expected")
|
||||
}
|
||||
|
||||
func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -124,11 +124,15 @@ func TestFunctions(t *testing.T) {
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerProcessPendingBundles", testL2RelayerProcessPendingBundles)
|
||||
t.Run("TestL2RelayerFinalizeTimeoutBatches", testL2RelayerFinalizeTimeoutBatches)
|
||||
t.Run("TestL2RelayerFinalizeTimeoutBundles", testL2RelayerFinalizeTimeoutBundles)
|
||||
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
|
||||
t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm)
|
||||
t.Run("TestL2RelayerFinalizeBatchConfirm", testL2RelayerFinalizeBatchConfirm)
|
||||
t.Run("TestL2RelayerFinalizeBundleConfirm", testL2RelayerFinalizeBundleConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
|
||||
|
||||
// test getBatchStatusByIndex
|
||||
t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -36,7 +35,6 @@ type BatchProposer struct {
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
forkMap map[uint64]bool
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
@@ -60,14 +58,13 @@ type BatchProposer struct {
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
|
||||
forkHeights, forkMap, _ := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Debug("new batch proposer",
|
||||
log.Info("new batch proposer",
|
||||
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
|
||||
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
"batchTimeoutSec", cfg.BatchTimeoutSec,
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize,
|
||||
"forkHeights", forkHeights)
|
||||
"maxBlobSize", maxBlobSize,
|
||||
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
|
||||
|
||||
p := &BatchProposer{
|
||||
ctx: ctx,
|
||||
@@ -80,7 +77,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
forkMap: forkMap,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
@@ -225,20 +221,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
return err
|
||||
}
|
||||
|
||||
startBlockNum := new(big.Int).SetUint64(firstUnbatchedChunk.StartBlockNumber)
|
||||
|
||||
var codecVersion encoding.CodecVersion
|
||||
var maxChunksThisBatch uint64
|
||||
if !p.chainCfg.IsBernoulli(startBlockNum) {
|
||||
codecVersion = encoding.CodecV0
|
||||
maxChunksThisBatch = 15
|
||||
} else if !p.chainCfg.IsCurie(startBlockNum) {
|
||||
codecVersion = encoding.CodecV1
|
||||
maxChunksThisBatch = 15
|
||||
} else {
|
||||
codecVersion = encoding.CodecV2
|
||||
maxChunksThisBatch = 45
|
||||
}
|
||||
maxChunksThisBatch := forks.GetMaxChunksPerBatch(p.chainCfg, firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime)
|
||||
|
||||
// select at most maxChunkNumPerBatch chunks
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, firstUnbatchedChunkIndex, int(maxChunksThisBatch))
|
||||
@@ -250,13 +233,14 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i, chunk := range dbChunks {
|
||||
// if a chunk is starting at a fork boundary, only consider earlier chunks
|
||||
if i != 0 && p.forkMap[chunk.StartBlockNumber] {
|
||||
// Ensure all chunks in the same batch use the same hardfork name
|
||||
// If a different hardfork name is found, truncate the chunks slice at that point
|
||||
hardforkName := forks.GetHardforkName(p.chainCfg, dbChunks[0].StartBlockNumber, dbChunks[0].StartBlockTime)
|
||||
for i := 1; i < len(dbChunks); i++ {
|
||||
currentHardfork := forks.GetHardforkName(p.chainCfg, dbChunks[i].StartBlockNumber, dbChunks[i].StartBlockTime)
|
||||
if currentHardfork != hardforkName {
|
||||
dbChunks = dbChunks[:i]
|
||||
if uint64(len(dbChunks)) < maxChunksThisBatch {
|
||||
maxChunksThisBatch = uint64(len(dbChunks))
|
||||
}
|
||||
maxChunksThisBatch = uint64(len(dbChunks)) // update maxChunksThisBatch to trigger batching, because these chunks are the last chunks before the hardfork
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -271,6 +255,8 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
return err
|
||||
}
|
||||
|
||||
codecVersion := forks.GetCodecVersion(p.chainCfg, firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime)
|
||||
|
||||
var batch encoding.Batch
|
||||
batch.Index = dbParentBatch.Index + 1
|
||||
batch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash)
|
||||
|
||||
@@ -26,7 +26,6 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
batchTimeoutSec uint64
|
||||
forkBlock *big.Int
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
@@ -75,15 +74,6 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "ForkBlockReached",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
forkBlock: big.NewInt(3),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -126,9 +116,7 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
@@ -144,10 +132,7 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
HomesteadBlock: tt.forkBlock,
|
||||
CurieBlock: big.NewInt(0),
|
||||
}, db, nil)
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
@@ -178,7 +163,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
batchTimeoutSec uint64
|
||||
forkBlock *big.Int
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
@@ -227,15 +211,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "ForkBlockReached",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
forkBlock: big.NewInt(3),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -280,7 +255,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
@@ -299,7 +273,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
@@ -331,7 +304,6 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
batchTimeoutSec uint64
|
||||
forkBlock *big.Int
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
@@ -366,7 +338,7 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 190330,
|
||||
maxL1CommitGas: 189179,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
@@ -380,15 +352,6 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "ForkBlockReached",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
forkBlock: big.NewInt(3),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -435,7 +398,6 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
@@ -456,7 +418,153 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen+1)
|
||||
batches = batches[1:]
|
||||
if tt.expectedBatchesLen > 0 {
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerCodecv3Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
batchTimeoutSec uint64
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 0,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIs0",
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIs0",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 249179,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 60,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
@@ -721,9 +829,90 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
batches = batches[1:]
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, 2)
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(209350), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
compressionTests := []bool{false, true} // false for uncompressed, true for compressed
|
||||
for _, compressed := range compressionTests {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
|
||||
// Add genesis batch.
|
||||
@@ -750,10 +939,14 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
var chainConfig *params.ChainConfig
|
||||
if compressed {
|
||||
if codecVersion == encoding.CodecV0 { // will never hit blob size limit
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -769,7 +962,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
|
||||
blockHeight := int64(0)
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for total := int64(0); total < 20; total++ {
|
||||
for total := int64(0); total < 90; total++ {
|
||||
for i := int64(0); i < 30; i++ {
|
||||
blockHeight++
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
@@ -783,12 +976,12 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
for i := 0; i < 2; i++ {
|
||||
bp.TryProposeBatch()
|
||||
}
|
||||
|
||||
@@ -798,12 +991,18 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
|
||||
var expectedNumBatches int
|
||||
var numChunksMultiplier uint64
|
||||
if compressed {
|
||||
expectedNumBatches = 1
|
||||
numChunksMultiplier = 20
|
||||
} else {
|
||||
expectedNumBatches = 20
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 15
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 1
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 45
|
||||
} else {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 45
|
||||
}
|
||||
assert.Len(t, batches, expectedNumBatches)
|
||||
|
||||
@@ -815,8 +1014,8 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
compressionTests := []bool{false, true} // false for uncompressed, true for compressed
|
||||
for _, compressed := range compressionTests {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
|
||||
// Add genesis batch.
|
||||
@@ -842,11 +1041,20 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedChunkNum uint64
|
||||
var chainConfig *params.ChainConfig
|
||||
if compressed {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
} else {
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
expectedChunkNum = 15
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
expectedChunkNum = 15
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
expectedChunkNum = 45
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
expectedChunkNum = 45
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -871,7 +1079,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
@@ -882,14 +1090,90 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
assert.Len(t, batches, 2)
|
||||
dbBatch := batches[1]
|
||||
|
||||
var expectedChunkNum uint64
|
||||
if compressed {
|
||||
expectedChunkNum = 45
|
||||
} else {
|
||||
expectedChunkNum = 15
|
||||
}
|
||||
assert.Equal(t, expectedChunkNum, dbBatch.EndChunkIndex)
|
||||
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerRespectHardforks(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
chainConfig := ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(1),
|
||||
CurieBlock: big.NewInt(2),
|
||||
DarwinTime: func() *uint64 { t := uint64(4); return &t }(),
|
||||
}
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
for i := int64(1); i <= 60; i++ {
|
||||
block.Header.Number = big.NewInt(i)
|
||||
block.Header.Time = uint64(i)
|
||||
err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
bp.TryProposeBatch()
|
||||
}
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 4)
|
||||
|
||||
expectedEndChunkIndices := []uint64{0, 1, 3, 4}
|
||||
expectedEndBlockNumbers := []uint64{0, 1, 3, 60}
|
||||
for i, batch := range batches {
|
||||
assert.Equal(t, expectedEndChunkIndices[i], batch.EndChunkIndex)
|
||||
chunk, err := chunkOrm.GetChunkByIndex(context.Background(), batch.EndChunkIndex)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedEndBlockNumbers[i], chunk.EndBlockNumber)
|
||||
}
|
||||
}
|
||||
|
||||
182
rollup/internal/controller/watcher/bundle_proposer.go
Normal file
182
rollup/internal/controller/watcher/bundle_proposer.go
Normal file
@@ -0,0 +1,182 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
// BundleProposer proposes bundles based on available unbundled batches.
|
||||
type BundleProposer struct {
|
||||
ctx context.Context
|
||||
db *gorm.DB
|
||||
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
bundleOrm *orm.Bundle
|
||||
|
||||
maxBatchNumPerBundle uint64
|
||||
bundleTimeoutSec uint64
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
bundleProposerCircleTotal prometheus.Counter
|
||||
proposeBundleFailureTotal prometheus.Counter
|
||||
proposeBundleUpdateInfoTotal prometheus.Counter
|
||||
proposeBundleUpdateInfoFailureTotal prometheus.Counter
|
||||
bundleBatchesNum prometheus.Gauge
|
||||
bundleFirstBlockTimeoutReached prometheus.Counter
|
||||
bundleBatchesProposeNotEnoughTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewBundleProposer creates a new BundleProposer instance.
|
||||
func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProposer {
|
||||
log.Info("new bundle proposer", "bundleBatchesNum", cfg.MaxBatchNumPerBundle, "bundleTimeoutSec", cfg.BundleTimeoutSec)
|
||||
|
||||
p := &BundleProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
maxBatchNumPerBundle: cfg.MaxBatchNumPerBundle,
|
||||
bundleTimeoutSec: cfg.BundleTimeoutSec,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
bundleProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_bundle_circle_total",
|
||||
Help: "Total number of propose bundle attempts.",
|
||||
}),
|
||||
proposeBundleFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_bundle_failure_total",
|
||||
Help: "Total number of propose bundle failures.",
|
||||
}),
|
||||
proposeBundleUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_bundle_update_info_total",
|
||||
Help: "Total number of propose bundle update info attempts.",
|
||||
}),
|
||||
proposeBundleUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_bundle_update_info_failure_total",
|
||||
Help: "Total number of propose bundle update info failures.",
|
||||
}),
|
||||
bundleBatchesNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_bundle_batches_number",
|
||||
Help: "The number of batches in the current bundle.",
|
||||
}),
|
||||
bundleFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_bundle_first_block_timeout_reached_total",
|
||||
Help: "Total times the first block in a bundle reached the timeout.",
|
||||
}),
|
||||
bundleBatchesProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_bundle_batches_propose_not_enough_total",
|
||||
Help: "Total number of times there were not enough batches to propose a bundle.",
|
||||
}),
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// TryProposeBundle tries to propose a new bundle.
|
||||
func (p *BundleProposer) TryProposeBundle() {
|
||||
p.bundleProposerCircleTotal.Inc()
|
||||
if err := p.proposeBundle(); err != nil {
|
||||
p.proposeBundleFailureTotal.Inc()
|
||||
log.Error("propose new bundle failed", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BundleProposer) updateDBBundleInfo(batches []*orm.Batch, codecVersion encoding.CodecVersion) error {
|
||||
if len(batches) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.proposeBundleUpdateInfoTotal.Inc()
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
bundle, err := p.bundleOrm.InsertBundle(p.ctx, batches, codecVersion, dbTX)
|
||||
if err != nil {
|
||||
log.Warn("BundleProposer.InsertBundle failed", "err", err)
|
||||
return err
|
||||
}
|
||||
if err := p.batchOrm.UpdateBundleHashInRange(p.ctx, bundle.StartBatchIndex, bundle.EndBatchIndex, bundle.Hash, dbTX); err != nil {
|
||||
log.Error("failed to update bundle_hash for batches", "bundle hash", bundle.Hash, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
p.proposeBundleUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update chunk info in orm failed", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BundleProposer) proposeBundle() error {
|
||||
firstUnbundledBatchIndex, err := p.bundleOrm.GetFirstUnbundledBatchIndex(p.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// select at most maxBlocksThisChunk blocks
|
||||
maxBatchesThisBundle := p.maxBatchNumPerBundle
|
||||
batches, err := p.batchOrm.GetBatchesGEIndexGECodecVersion(p.ctx, firstUnbundledBatchIndex, encoding.CodecV3, int(maxBatchesThisBundle))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(batches) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure all blocks in the same chunk use the same hardfork name
|
||||
// If a different hardfork name is found, truncate the blocks slice at that point
|
||||
firstChunk, err := p.chunkOrm.GetChunkByIndex(p.ctx, batches[0].StartChunkIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hardforkName := forks.GetHardforkName(p.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime)
|
||||
codecVersion := forks.GetCodecVersion(p.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime)
|
||||
for i := 1; i < len(batches); i++ {
|
||||
chunk, err := p.chunkOrm.GetChunkByIndex(p.ctx, batches[i].StartChunkIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentHardfork := forks.GetHardforkName(p.chainCfg, chunk.StartBlockNumber, chunk.StartBlockTime)
|
||||
if currentHardfork != hardforkName {
|
||||
batches = batches[:i]
|
||||
maxBatchesThisBundle = uint64(i) // update maxBlocksThisChunk to trigger chunking, because these blocks are the last blocks before the hardfork
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if uint64(len(batches)) == maxBatchesThisBundle {
|
||||
log.Info("reached maximum number of batches per bundle", "batch count", len(batches), "start batch index", batches[0].Index, "end batch index", batches[len(batches)-1].Index)
|
||||
p.bundleFirstBlockTimeoutReached.Inc()
|
||||
p.bundleBatchesNum.Set(float64(len(batches)))
|
||||
return p.updateDBBundleInfo(batches, codecVersion)
|
||||
}
|
||||
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if firstChunk.StartBlockTime+p.bundleTimeoutSec < currentTimeSec {
|
||||
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "current time", currentTimeSec)
|
||||
p.bundleFirstBlockTimeoutReached.Inc()
|
||||
p.bundleBatchesNum.Set(float64(len(batches)))
|
||||
return p.updateDBBundleInfo(batches, codecVersion)
|
||||
}
|
||||
|
||||
log.Debug("pending batches are not enough and do not contain a timeout batch")
|
||||
p.bundleBatchesProposeNotEnoughTotal.Inc()
|
||||
return nil
|
||||
}
|
||||
226
rollup/internal/controller/watcher/bundle_proposer_test.go
Normal file
226
rollup/internal/controller/watcher/bundle_proposer_test.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func testBundleProposerLimits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBatchNumPerBundle uint64
|
||||
bundleTimeoutSec uint64
|
||||
expectedBundlesLen int
|
||||
expectedBatchesInFirstBundle uint64 // only be checked when expectedBundlesLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxBatchNumPerBundle: math.MaxUint64,
|
||||
bundleTimeoutSec: math.MaxUint32,
|
||||
expectedBundlesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxBatchNumPerBundle: math.MaxUint64,
|
||||
bundleTimeoutSec: 0,
|
||||
expectedBundlesLen: 1,
|
||||
expectedBatchesInFirstBundle: 2,
|
||||
},
|
||||
{
|
||||
name: "maxBatchNumPerBundleIs0",
|
||||
maxBatchNumPerBundle: 0,
|
||||
bundleTimeoutSec: math.MaxUint32,
|
||||
expectedBundlesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "maxBatchNumPerBundleIs1",
|
||||
maxBatchNumPerBundle: 1,
|
||||
bundleTimeoutSec: math.MaxUint32,
|
||||
expectedBundlesLen: 1,
|
||||
expectedBatchesInFirstBundle: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
bap.TryProposeBatch() // batch1 contains chunk1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
bap.TryProposeBatch() // batch2 contains chunk2
|
||||
|
||||
bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: tt.maxBatchNumPerBundle,
|
||||
BundleTimeoutSec: tt.bundleTimeoutSec,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bup.TryProposeBundle()
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, bundles, tt.expectedBundlesLen)
|
||||
if tt.expectedBundlesLen > 0 {
|
||||
assert.Equal(t, uint64(1), bundles[0].StartBatchIndex)
|
||||
assert.Equal(t, tt.expectedBatchesInFirstBundle, bundles[0].EndBatchIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(bundles[0].ProvingStatus))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBundleProposerRespectHardforks(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
chainConfig := ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(1),
|
||||
CurieBlock: big.NewInt(2),
|
||||
DarwinTime: func() *uint64 { t := uint64(4); return &t }(),
|
||||
}
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
for i := int64(1); i <= 60; i++ {
|
||||
block.Header.Number = big.NewInt(i)
|
||||
block.Header.Time = uint64(i)
|
||||
err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
bap.TryProposeBatch()
|
||||
}
|
||||
|
||||
bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: math.MaxUint64,
|
||||
BundleTimeoutSec: 0,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
bup.TryProposeBundle()
|
||||
}
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, bundles, 1)
|
||||
|
||||
expectedStartBatchIndices := []uint64{3}
|
||||
expectedEndChunkIndices := []uint64{3}
|
||||
for i, bundle := range bundles {
|
||||
assert.Equal(t, expectedStartBatchIndices[i], bundle.StartBatchIndex)
|
||||
assert.Equal(t, expectedEndChunkIndices[i], bundle.EndBatchIndex)
|
||||
}
|
||||
}
|
||||
@@ -36,7 +36,6 @@ type ChunkProposer struct {
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
forkHeights []uint64
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
@@ -62,16 +61,16 @@ type ChunkProposer struct {
|
||||
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||
forkHeights, _, _ := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Debug("new chunk proposer",
|
||||
log.Info("new chunk proposer",
|
||||
"maxBlockNumPerChunk", cfg.MaxBlockNumPerChunk,
|
||||
"maxTxNumPerChunk", cfg.MaxTxNumPerChunk,
|
||||
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
|
||||
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
|
||||
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize,
|
||||
"forkHeights", forkHeights)
|
||||
"maxBlobSize", maxBlobSize,
|
||||
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
|
||||
|
||||
p := &ChunkProposer{
|
||||
ctx: ctx,
|
||||
@@ -86,7 +85,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
forkHeights: forkHeights,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
@@ -240,10 +238,6 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
}
|
||||
|
||||
maxBlocksThisChunk := p.maxBlockNumPerChunk
|
||||
blocksUntilFork := forks.BlocksUntilFork(unchunkedBlockHeight, p.forkHeights)
|
||||
if blocksUntilFork != 0 && blocksUntilFork < maxBlocksThisChunk {
|
||||
maxBlocksThisChunk = blocksUntilFork
|
||||
}
|
||||
|
||||
// select at most maxBlocksThisChunk blocks
|
||||
blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, int(maxBlocksThisChunk))
|
||||
@@ -255,15 +249,20 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var codecVersion encoding.CodecVersion
|
||||
if !p.chainCfg.IsBernoulli(blocks[0].Header.Number) {
|
||||
codecVersion = encoding.CodecV0
|
||||
} else if !p.chainCfg.IsCurie(blocks[0].Header.Number) {
|
||||
codecVersion = encoding.CodecV1
|
||||
} else {
|
||||
codecVersion = encoding.CodecV2
|
||||
// Ensure all blocks in the same chunk use the same hardfork name
|
||||
// If a different hardfork name is found, truncate the blocks slice at that point
|
||||
hardforkName := forks.GetHardforkName(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time)
|
||||
for i := 1; i < len(blocks); i++ {
|
||||
currentHardfork := forks.GetHardforkName(p.chainCfg, blocks[i].Header.Number.Uint64(), blocks[i].Header.Time)
|
||||
if currentHardfork != hardforkName {
|
||||
blocks = blocks[:i]
|
||||
maxBlocksThisChunk = uint64(i) // update maxBlocksThisChunk to trigger chunking, because these blocks are the last blocks before the hardfork
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
codecVersion := forks.GetCodecVersion(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time)
|
||||
|
||||
// Including Curie block in a sole chunk.
|
||||
if p.chainCfg.CurieBlock != nil && blocks[0].Header.Number.Cmp(p.chainCfg.CurieBlock) == 0 {
|
||||
chunk := encoding.Chunk{Blocks: blocks[:1]}
|
||||
@@ -334,10 +333,9 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if metrics.FirstBlockTimestamp+p.chunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk {
|
||||
log.Info("reached maximum number of blocks in chunk or first block timeout",
|
||||
"start block number", chunk.Blocks[0].Header.Number,
|
||||
"block count", len(chunk.Blocks),
|
||||
"block number", chunk.Blocks[0].Header.Number,
|
||||
"block timestamp", metrics.FirstBlockTimestamp,
|
||||
"start block number", chunk.Blocks[0].Header.Number,
|
||||
"start block timestamp", metrics.FirstBlockTimestamp,
|
||||
"current time", currentTimeSec)
|
||||
|
||||
p.chunkFirstBlockTimeoutReached.Inc()
|
||||
|
||||
@@ -25,7 +25,6 @@ func testChunkProposerCodecv0Limits(t *testing.T) {
|
||||
maxL1CommitCalldataSize uint64
|
||||
maxRowConsumption uint64
|
||||
chunkTimeoutSec uint64
|
||||
forkBlock *big.Int
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
}{
|
||||
@@ -145,18 +144,6 @@ func testChunkProposerCodecv0Limits(t *testing.T) {
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "ForkBlockReached",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
forkBlock: big.NewInt(2),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -176,9 +163,7 @@ func testChunkProposerCodecv0Limits(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
@@ -209,7 +194,6 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
|
||||
maxL1CommitCalldataSize uint64
|
||||
maxRowConsumption uint64
|
||||
chunkTimeoutSec uint64
|
||||
forkBlock *big.Int
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
}{
|
||||
@@ -329,18 +313,6 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "ForkBlockReached",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
forkBlock: big.NewInt(2),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -360,9 +332,7 @@ func testChunkProposerCodecv1Limits(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
@@ -393,7 +363,6 @@ func testChunkProposerCodecv2Limits(t *testing.T) {
|
||||
maxL1CommitCalldataSize uint64
|
||||
maxRowConsumption uint64
|
||||
chunkTimeoutSec uint64
|
||||
forkBlock *big.Int
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
}{
|
||||
@@ -513,17 +482,175 @@ func testChunkProposerCodecv2Limits(t *testing.T) {
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: tt.maxBlockNum,
|
||||
MaxTxNumPerChunk: tt.maxTxNum,
|
||||
MaxL1CommitGasPerChunk: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize,
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, tt.expectedChunksLen)
|
||||
|
||||
if len(chunks) > 0 {
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk)
|
||||
firstChunkHash := chunks[0].Hash
|
||||
for _, chunkHash := range chunkHashes {
|
||||
assert.Equal(t, firstChunkHash, chunkHash)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerCodecv3Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBlockNum uint64
|
||||
maxTxNum uint64
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
maxRowConsumption uint64
|
||||
chunkTimeoutSec uint64
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
}{
|
||||
{
|
||||
name: "ForkBlockReached",
|
||||
name: "NoLimitReached",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 0,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 0,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs0",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 0,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxBlockNumPerChunkIs1",
|
||||
maxBlockNum: 1,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 2,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 62500,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 60,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs1",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
forkBlock: big.NewInt(2),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -545,7 +672,7 @@ func testChunkProposerCodecv2Limits(t *testing.T) {
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock}, db, nil)
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
@@ -568,7 +695,7 @@ func testChunkProposerCodecv2Limits(t *testing.T) {
|
||||
}
|
||||
|
||||
func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2}
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
@@ -584,8 +711,10 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else {
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -615,6 +744,8 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
numBlocksMultiplier = 22
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
numBlocksMultiplier = 255
|
||||
} else {
|
||||
numBlocksMultiplier = 255
|
||||
}
|
||||
assert.Len(t, chunks, expectedNumChunks)
|
||||
|
||||
@@ -629,12 +760,15 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerIncludeCurieBlockInOneChunk(t *testing.T) {
|
||||
func testChunkProposerRespectHardforks(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
for i := int64(0); i < 10; i++ {
|
||||
for i := int64(1); i <= 20; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(i)
|
||||
block.Header.Time = uint64(i)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -645,12 +779,16 @@ func testChunkProposerIncludeCurieBlockInOneChunk(t *testing.T) {
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(1), CurieBlock: big.NewInt(2)}, db, nil)
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(1),
|
||||
CurieBlock: big.NewInt(2),
|
||||
DarwinTime: func() *uint64 { t := uint64(4); return &t }(),
|
||||
}, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := 0; i < 5; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
@@ -658,9 +796,9 @@ func testChunkProposerIncludeCurieBlockInOneChunk(t *testing.T) {
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Len(t, chunks, 4)
|
||||
expectedEndBlockNumbers := []uint64{1, 2, 3, 20}
|
||||
for i, chunk := range chunks {
|
||||
assert.Equal(t, uint64(i+1), chunk.EndBlockNumber)
|
||||
assert.Equal(t, expectedEndBlockNumbers[i], chunk.EndBlockNumber)
|
||||
}
|
||||
database.CloseDB(db)
|
||||
}
|
||||
|
||||
@@ -103,18 +103,26 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestChunkProposerCodecv0Limits", testChunkProposerCodecv0Limits)
|
||||
t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits)
|
||||
t.Run("TestChunkProposerCodecv2Limits", testChunkProposerCodecv2Limits)
|
||||
t.Run("TestChunkProposerCodecv3Limits", testChunkProposerCodecv3Limits)
|
||||
t.Run("TestChunkProposerBlobSizeLimit", testChunkProposerBlobSizeLimit)
|
||||
t.Run("TestChunkProposerIncludeCurieBlockInOneChunk", testChunkProposerIncludeCurieBlockInOneChunk)
|
||||
t.Run("TestChunkProposerRespectHardforks", testChunkProposerRespectHardforks)
|
||||
|
||||
// Run batch proposer test cases.
|
||||
t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits)
|
||||
t.Run("TestBatchProposerCodecv1Limits", testBatchProposerCodecv1Limits)
|
||||
t.Run("TestBatchProposerCodecv2Limits", testBatchProposerCodecv2Limits)
|
||||
t.Run("TestBatchProposerCodecv3Limits", testBatchProposerCodecv3Limits)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeCodecv0Estimation", testBatchCommitGasAndCalldataSizeCodecv0Estimation)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeCodecv1Estimation", testBatchCommitGasAndCalldataSizeCodecv1Estimation)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeCodecv2Estimation", testBatchCommitGasAndCalldataSizeCodecv2Estimation)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeCodecv3Estimation", testBatchCommitGasAndCalldataSizeCodecv3Estimation)
|
||||
t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit)
|
||||
t.Run("TestBatchProposerMaxChunkNumPerBatchLimit", testBatchProposerMaxChunkNumPerBatchLimit)
|
||||
t.Run("TestBatchProposerRespectHardforks", testBatchProposerRespectHardforks)
|
||||
|
||||
// Run bundle proposer test cases.
|
||||
t.Run("TestBundleProposerLimits", testBundleProposerLimits)
|
||||
t.Run("TestBundleProposerRespectHardforks", testBundleProposerRespectHardforks)
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
|
||||
|
||||
@@ -34,6 +34,7 @@ type Batch struct {
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
|
||||
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
|
||||
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
|
||||
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
|
||||
|
||||
// proof
|
||||
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
|
||||
@@ -58,6 +59,9 @@ type Batch struct {
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// bundle
|
||||
BundleHash string `json:"bundle_hash" gorm:"column:bundle_hash"`
|
||||
|
||||
// metadata
|
||||
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
|
||||
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
|
||||
@@ -157,6 +161,26 @@ func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error)
|
||||
return latestBatch.EndChunkIndex + 1, nil
|
||||
}
|
||||
|
||||
// GetBatchesGEIndexGECodecVersion retrieves batches that have a batch index greater than or equal to the given index and codec version.
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetBatchesGEIndexGECodecVersion(ctx context.Context, index uint64, codecv encoding.CodecVersion, limit int) ([]*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("index >= ?", index)
|
||||
db = db.Where("codec_version >= ?", codecv)
|
||||
db = db.Order("index ASC")
|
||||
|
||||
if limit > 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
var batches []*Batch
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetBatchesGEIndexGECodecVersion error: %w", err)
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
|
||||
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
|
||||
if len(hashes) == 0 {
|
||||
@@ -264,6 +288,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
WithdrawRoot: batch.WithdrawRoot().Hex(),
|
||||
ParentBatchHash: batch.ParentBatchHash.Hex(),
|
||||
BatchHeader: batchMeta.BatchBytes,
|
||||
CodecVersion: int16(codecVersion),
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
@@ -391,7 +416,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), finalizeTxHash)
|
||||
return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, finalizeTxHash: %v", err, hash, status.String(), finalizeTxHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -417,3 +442,73 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBundleHashInRange updates the bundle_hash for bundles within the specified range (inclusive).
|
||||
// The range is closed, i.e., it includes both start and end indices.
|
||||
func (o *Batch) UpdateBundleHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, bundleHash string, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
|
||||
|
||||
if err := db.Update("bundle_hash", bundleHash).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateBundleHashInRange error: %w, start index: %v, end index: %v, batch hash: %v", err, startIndex, endIndex, bundleHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatusByBundleHash updates the proving_status for batches within the specified bundle_hash
|
||||
func (o *Batch) UpdateProvingStatusByBundleHash(ctx context.Context, bundleHash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("bundle_hash = ?", bundleHash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateProvingStatusByBundleHash error: %w, bundle hash: %v, status: %v", err, bundleHash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateFinalizeTxHashAndRollupStatusByBundleHash updates the the finalize transaction hash and rollup status for batches within the specified bundle_hash
|
||||
func (o *Batch) UpdateFinalizeTxHashAndRollupStatusByBundleHash(ctx context.Context, bundleHash string, finalizeTxHash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["finalize_tx_hash"] = finalizeTxHash
|
||||
updateFields["rollup_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.RollupFinalized:
|
||||
updateFields["finalized_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("bundle_hash = ?", bundleHash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatusByBundleHash error: %w, bundle hash: %v, status: %v", err, bundleHash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
285
rollup/internal/orm/bundle.go
Normal file
285
rollup/internal/orm/bundle.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Bundle represents a bundle of batches.
|
||||
type Bundle struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
// bundle
|
||||
Index uint64 `json:"index" gorm:"column:index;primaryKey"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
StartBatchIndex uint64 `json:"start_batch_index" gorm:"column:start_batch_index"`
|
||||
EndBatchIndex uint64 `json:"end_batch_index" gorm:"column:end_batch_index"`
|
||||
StartBatchHash string `json:"start_batch_hash" gorm:"column:start_batch_hash"`
|
||||
EndBatchHash string `json:"end_batch_hash" gorm:"column:end_batch_hash"`
|
||||
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
|
||||
|
||||
// proof
|
||||
BatchProofsStatus int16 `json:"batch_proofs_status" gorm:"column:batch_proofs_status;default:1"`
|
||||
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
|
||||
// rollup
|
||||
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewBundle creates a new Bundle database instance.
|
||||
func NewBundle(db *gorm.DB) *Bundle {
|
||||
return &Bundle{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the Bundle model.
|
||||
func (*Bundle) TableName() string {
|
||||
return "bundle"
|
||||
}
|
||||
|
||||
// getLatestBundle retrieves the latest bundle from the database.
|
||||
func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Order("index desc")
|
||||
|
||||
var latestBundle Bundle
|
||||
if err := db.First(&latestBundle).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("getLatestBundle error: %w", err)
|
||||
}
|
||||
return &latestBundle, nil
|
||||
}
|
||||
|
||||
// GetBundles retrieves selected bundles from the database.
|
||||
// The returned bundles are sorted in ascending order by their index.
|
||||
// only used in unit tests.
|
||||
func (o *Bundle) GetBundles(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Bundle, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
|
||||
for _, orderBy := range orderByList {
|
||||
db = db.Order(orderBy)
|
||||
}
|
||||
|
||||
if limit > 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
db = db.Order("index ASC")
|
||||
|
||||
var bundles []*Bundle
|
||||
if err := db.Find(&bundles).Error; err != nil {
|
||||
return nil, fmt.Errorf("Bundle.GetBundles error: %w, fields: %v, orderByList: %v", err, fields, orderByList)
|
||||
}
|
||||
return bundles, nil
|
||||
}
|
||||
|
||||
// GetFirstUnbundledBatchIndex retrieves the first unbundled batch index.
|
||||
func (o *Bundle) GetFirstUnbundledBatchIndex(ctx context.Context) (uint64, error) {
|
||||
// Get the latest bundle
|
||||
latestBundle, err := o.getLatestBundle(ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Bundle.GetFirstUnbundledBatchIndex error: %w", err)
|
||||
}
|
||||
if latestBundle == nil {
|
||||
return 0, nil
|
||||
}
|
||||
return latestBundle.EndBatchIndex + 1, nil
|
||||
}
|
||||
|
||||
// GetFirstPendingBundle retrieves the first pending bundle from the database.
|
||||
func (o *Bundle) GetFirstPendingBundle(ctx context.Context) (*Bundle, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("rollup_status = ?", types.RollupPending)
|
||||
db = db.Order("index asc")
|
||||
|
||||
var pendingBundle Bundle
|
||||
if err := db.First(&pendingBundle).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Bundle.GetFirstPendingBundle error: %w", err)
|
||||
}
|
||||
return &pendingBundle, nil
|
||||
}
|
||||
|
||||
// GetVerifiedProofByHash retrieves the verified aggregate proof for a bundle with the given hash.
|
||||
func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.BundleProof, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Select("proof")
|
||||
db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified)
|
||||
|
||||
var bundle Bundle
|
||||
if err := db.Find(&bundle).Error; err != nil {
|
||||
return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash)
|
||||
}
|
||||
|
||||
var proof message.BundleProof
|
||||
if err := json.Unmarshal(bundle.Proof, &proof); err != nil {
|
||||
return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash)
|
||||
}
|
||||
return &proof, nil
|
||||
}
|
||||
|
||||
// InsertBundle inserts a new bundle into the database.
|
||||
// Assuming input batches are ordered by index.
|
||||
func (o *Bundle) InsertBundle(ctx context.Context, batches []*Batch, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Bundle, error) {
|
||||
if len(batches) == 0 {
|
||||
return nil, errors.New("Bundle.InsertBundle error: no batches provided")
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
|
||||
newBundle := Bundle{
|
||||
StartBatchHash: batches[0].Hash,
|
||||
StartBatchIndex: batches[0].Index,
|
||||
EndBatchHash: batches[len(batches)-1].Hash,
|
||||
EndBatchIndex: batches[len(batches)-1].Index,
|
||||
BatchProofsStatus: int16(types.BatchProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
CodecVersion: int16(codecVersion),
|
||||
}
|
||||
|
||||
// Not part of DA hash, used for SQL query consistency and ease of use.
|
||||
// Derived using keccak256(concat(start_batch_hash_bytes, end_batch_hash_bytes)).
|
||||
newBundle.Hash = hex.EncodeToString(crypto.Keccak256(append(common.Hex2Bytes(newBundle.StartBatchHash[2:]), common.Hex2Bytes(newBundle.EndBatchHash[2:])...)))
|
||||
|
||||
if err := db.Create(&newBundle).Error; err != nil {
|
||||
return nil, fmt.Errorf("Bundle.InsertBundle Create error: %w, bundle hash: %v", err, newBundle.Hash)
|
||||
}
|
||||
|
||||
return &newBundle, nil
|
||||
}
|
||||
|
||||
// UpdateFinalizeTxHashAndRollupStatus updates the finalize transaction hash and rollup status for a bundle.
|
||||
func (o *Bundle) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["finalize_tx_hash"] = finalizeTxHash
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupFinalized {
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Bundle.UpdateFinalizeTxHashAndRollupStatus error: %w, bundle hash: %v, status: %v, finalizeTxHash: %v", err, hash, status.String(), finalizeTxHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a bundle.
|
||||
func (o *Bundle) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Bundle.UpdateProvingStatus error: %w, bundle hash: %v, status: %v", err, hash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateRollupStatus updates the rollup status for a bundle.
|
||||
// only used in unit tests.
|
||||
func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupFinalized {
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Bundle.UpdateRollupStatus error: %w, bundle hash: %v, status: %v", err, hash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProofAndProvingStatusByHash updates the bundle proof and proving status by hash.
|
||||
// only used in unit tests.
|
||||
func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.BundleProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proof"] = proofBytes
|
||||
updateFields["proving_status"] = provingStatus
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
updateFields["proved_at"] = utils.NowUTC()
|
||||
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Bundle.UpdateProofAndProvingStatusByHash error: %w, bundle hash: %v", err, hash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv1"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv2"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv3"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -17,6 +19,7 @@ import (
|
||||
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
@@ -29,6 +32,7 @@ var (
|
||||
l2BlockOrm *L2Block
|
||||
chunkOrm *Chunk
|
||||
batchOrm *Batch
|
||||
bundleOrm *Bundle
|
||||
pendingTransactionOrm *PendingTransaction
|
||||
|
||||
block1 *encoding.Block
|
||||
@@ -59,6 +63,7 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
bundleOrm = NewBundle(db)
|
||||
batchOrm = NewBatch(db)
|
||||
chunkOrm = NewChunk(db)
|
||||
l2BlockOrm = NewL2Block(db)
|
||||
@@ -165,7 +170,7 @@ func TestL2BlockOrm(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChunkOrm(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
for _, codecVersion := range codecVersions {
|
||||
@@ -184,7 +189,7 @@ func TestChunkOrm(t *testing.T) {
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
daChunk1, createErr := codecv1.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
@@ -194,6 +199,26 @@ func TestChunkOrm(t *testing.T) {
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
daChunk1, createErr := codecv2.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
daChunk2, createErr := codecv2.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
daChunk1, createErr := codecv3.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
daChunk2, createErr := codecv3.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, utils.ChunkMetrics{})
|
||||
@@ -238,7 +263,7 @@ func TestChunkOrm(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBatchOrm(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
for _, codecVersion := range codecVersions {
|
||||
@@ -247,10 +272,8 @@ func TestBatchOrm(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1},
|
||||
Index: 0,
|
||||
Chunks: []*encoding.Chunk{chunk1},
|
||||
}
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
@@ -264,18 +287,24 @@ func TestBatchOrm(t *testing.T) {
|
||||
daBatch1, createErr := codecv0.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 = daBatch1.Hash().Hex()
|
||||
} else {
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
daBatch1, createErr := codecv1.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 = daBatch1.Hash().Hex()
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
daBatch1, createErr := codecv2.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 = daBatch1.Hash().Hex()
|
||||
} else {
|
||||
daBatch1, createErr := codecv3.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 = daBatch1.Hash().Hex()
|
||||
}
|
||||
assert.Equal(t, hash1, batchHash1)
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
Index: 1,
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
}
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
@@ -351,9 +380,195 @@ func TestBatchOrm(t *testing.T) {
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
|
||||
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
|
||||
batches, err := batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(batches))
|
||||
assert.Equal(t, batchHash1, batches[0].Hash)
|
||||
assert.Equal(t, batchHash2, batches[1].Hash)
|
||||
|
||||
batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(batches))
|
||||
assert.Equal(t, batchHash1, batches[0].Hash)
|
||||
|
||||
batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 1, codecVersion, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(batches))
|
||||
assert.Equal(t, batchHash2, batches[0].Hash)
|
||||
|
||||
batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion+1, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(batches))
|
||||
|
||||
err = batchOrm.UpdateBundleHashInRange(context.Background(), 0, 0, "test hash")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateProvingStatusByBundleHash(context.Background(), "test hash", types.ProvingTaskFailed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(context.Background(), "test hash", "tx hash", types.RollupCommitFailed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(batches))
|
||||
assert.Equal(t, batchHash1, batches[0].Hash)
|
||||
assert.Equal(t, batchHash2, batches[1].Hash)
|
||||
assert.Equal(t, types.ProvingTaskFailed, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
assert.Equal(t, types.RollupCommitFailed, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(batches[1].ProvingStatus))
|
||||
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(batches[1].RollupStatus))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBundleOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
batch1 := &encoding.Batch{
|
||||
Index: 0,
|
||||
Chunks: []*encoding.Chunk{chunk1},
|
||||
}
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV3, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
batch2 := &encoding.Batch{
|
||||
Index: 1,
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
}
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV3, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var bundle1 *Bundle
|
||||
var bundle2 *Bundle
|
||||
|
||||
t.Run("InsertBundle", func(t *testing.T) {
|
||||
bundle1, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch1}, encoding.CodecV3)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, bundle1)
|
||||
assert.Equal(t, uint64(0), bundle1.StartBatchIndex)
|
||||
assert.Equal(t, uint64(0), bundle1.EndBatchIndex)
|
||||
assert.Equal(t, dbBatch1.Hash, bundle1.StartBatchHash)
|
||||
assert.Equal(t, dbBatch1.Hash, bundle1.EndBatchHash)
|
||||
assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(bundle1.CodecVersion))
|
||||
|
||||
bundle2, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch2}, encoding.CodecV3)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, bundle2)
|
||||
assert.Equal(t, uint64(1), bundle2.StartBatchIndex)
|
||||
assert.Equal(t, uint64(1), bundle2.EndBatchIndex)
|
||||
assert.Equal(t, dbBatch2.Hash, bundle2.StartBatchHash)
|
||||
assert.Equal(t, dbBatch2.Hash, bundle2.EndBatchHash)
|
||||
assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(bundle2.CodecVersion))
|
||||
})
|
||||
|
||||
t.Run("GetFirstUnbundledBatchIndex", func(t *testing.T) {
|
||||
index, err := bundleOrm.GetFirstUnbundledBatchIndex(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2), index)
|
||||
})
|
||||
|
||||
t.Run("GetFirstPendingBundle", func(t *testing.T) {
|
||||
bundle, err := bundleOrm.GetFirstPendingBundle(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, bundle)
|
||||
assert.Equal(t, int16(types.RollupPending), bundle.RollupStatus)
|
||||
})
|
||||
|
||||
t.Run("UpdateFinalizeTxHashAndRollupStatus", func(t *testing.T) {
|
||||
err := bundleOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), bundle1.Hash, "0xabcd", types.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pendingBundle, err := bundleOrm.GetFirstPendingBundle(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2), pendingBundle.Index)
|
||||
|
||||
var finalizedBundle Bundle
|
||||
err = db.Where("hash = ?", bundle1.Hash).First(&finalizedBundle).Error
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0xabcd", finalizedBundle.FinalizeTxHash)
|
||||
assert.Equal(t, int16(types.RollupFinalized), finalizedBundle.RollupStatus)
|
||||
assert.NotNil(t, finalizedBundle.FinalizedAt)
|
||||
})
|
||||
|
||||
t.Run("UpdateProvingStatus", func(t *testing.T) {
|
||||
err := bundleOrm.UpdateProvingStatus(context.Background(), bundle1.Hash, types.ProvingTaskAssigned)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var bundle Bundle
|
||||
err = db.Where("hash = ?", bundle1.Hash).First(&bundle).Error
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int16(types.ProvingTaskAssigned), bundle.ProvingStatus)
|
||||
assert.NotNil(t, bundle.ProverAssignedAt)
|
||||
|
||||
err = bundleOrm.UpdateProvingStatus(context.Background(), bundle1.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.Where("hash = ?", bundle1.Hash).First(&bundle).Error
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int16(types.ProvingTaskVerified), bundle.ProvingStatus)
|
||||
assert.NotNil(t, bundle.ProvedAt)
|
||||
})
|
||||
|
||||
t.Run("GetVerifiedProofByHash", func(t *testing.T) {
|
||||
proof := &message.BundleProof{
|
||||
Proof: []byte("test proof"),
|
||||
}
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.Model(&Bundle{}).Where("hash = ?", bundle1.Hash).Update("proof", proofBytes).Error
|
||||
assert.NoError(t, err)
|
||||
|
||||
retrievedProof, err := bundleOrm.GetVerifiedProofByHash(context.Background(), bundle1.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, proof.Proof, retrievedProof.Proof)
|
||||
})
|
||||
|
||||
t.Run("GetBundles", func(t *testing.T) {
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(bundles))
|
||||
assert.Equal(t, bundle1.Hash, bundles[0].Hash)
|
||||
assert.Equal(t, bundle2.Hash, bundles[1].Hash)
|
||||
})
|
||||
|
||||
t.Run("UpdateProofAndProvingStatusByHash", func(t *testing.T) {
|
||||
proof := &message.BundleProof{
|
||||
Proof: []byte("new test proof"),
|
||||
}
|
||||
err := bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle2.Hash, proof, types.ProvingTaskVerified, 600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var bundle Bundle
|
||||
err = db.Where("hash = ?", bundle2.Hash).First(&bundle).Error
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(bundle.ProvingStatus))
|
||||
assert.Equal(t, int32(600), bundle.ProofTimeSec)
|
||||
assert.NotNil(t, bundle.ProvedAt)
|
||||
|
||||
var retrievedProof message.BundleProof
|
||||
err = json.Unmarshal(bundle.Proof, &retrievedProof)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, proof.Proof, retrievedProof.Proof)
|
||||
})
|
||||
|
||||
t.Run("UpdateRollupStatus", func(t *testing.T) {
|
||||
err := bundleOrm.UpdateRollupStatus(context.Background(), bundle2.Hash, types.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var bundle Bundle
|
||||
err = db.Where("hash = ?", bundle2.Hash).First(&bundle).Error
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(bundle.RollupStatus))
|
||||
assert.NotNil(t, bundle.FinalizedAt)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPendingTransactionOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -95,6 +95,22 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
|
||||
return nil, fmt.Errorf("failed to estimate codecv2 chunk L1 commit batch size and blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV3:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas = codecv3.EstimateChunkL1CommitGas(chunk)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize = codecv3.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv3.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv3 chunk L1 commit batch size and blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
}
|
||||
@@ -203,6 +219,22 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
|
||||
return nil, fmt.Errorf("failed to estimate codecv2 batch L1 commit batch size and blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV3:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas = codecv3.EstimateBatchL1CommitGas(batch)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize = codecv3.EstimateBatchL1CommitCalldataSize(batch)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv3.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv3 batch L1 commit batch size and blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
}
|
||||
@@ -241,6 +273,16 @@ func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, code
|
||||
return common.Hash{}, fmt.Errorf("failed to get codecv2 DA chunk hash: %w", err)
|
||||
}
|
||||
return chunkHash, nil
|
||||
case encoding.CodecV3:
|
||||
daChunk, err := codecv3.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to create codecv3 DA chunk: %w", err)
|
||||
}
|
||||
chunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get codecv3 DA chunk hash: %w", err)
|
||||
}
|
||||
return chunkHash, nil
|
||||
default:
|
||||
return common.Hash{}, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
}
|
||||
@@ -374,6 +416,44 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
|
||||
return nil, fmt.Errorf("failed to get codecv2 end DA chunk hash: %w", err)
|
||||
}
|
||||
return batchMeta, nil
|
||||
case encoding.CodecV3:
|
||||
daBatch, err := codecv3.NewDABatch(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv3 DA batch: %w", err)
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProofForPointEvaluation()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv3 blob data proof for point evaluation: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBlobDataProof: blobDataProof,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv3.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv3 start DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.StartChunkHash, err = startDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv3 start DA chunk hash: %w", err)
|
||||
}
|
||||
|
||||
endDAChunk, err := codecv3.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv3 end DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.EndChunkHash, err = endDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv3 end DA chunk hash: %w", err)
|
||||
}
|
||||
return batchMeta, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
// SPDX-License-Identifier: MIT
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
import {BatchHeaderV0Codec} from "../../../scroll-contracts/src/libraries/codec/BatchHeaderV0Codec.sol";
|
||||
import {BatchHeaderV1Codec} from "../../../scroll-contracts/src/libraries/codec/BatchHeaderV1Codec.sol";
|
||||
import {BatchHeaderV3Codec} from "../../../scroll-contracts/src/libraries/codec/BatchHeaderV3Codec.sol";
|
||||
import {ChunkCodecV0} from "../../../scroll-contracts/src/libraries/codec/ChunkCodecV0.sol";
|
||||
import {ChunkCodecV1} from "../../../scroll-contracts/src/libraries/codec/ChunkCodecV1.sol";
|
||||
|
||||
contract MockBridge {
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when committing a committed batch.
|
||||
error ErrorBatchIsAlreadyCommitted();
|
||||
|
||||
@@ -20,7 +25,7 @@ contract MockBridge {
|
||||
error ErrorCallPointEvaluationPrecompileFailed();
|
||||
|
||||
/// @dev Thrown when the transaction has multiple blobs.
|
||||
error ErrorFoundMultipleBlob();
|
||||
error ErrorFoundMultipleBlobs();
|
||||
|
||||
/// @dev Thrown when some fields are not zero in genesis batch.
|
||||
error ErrorGenesisBatchHasNonZeroField();
|
||||
@@ -43,11 +48,8 @@ contract MockBridge {
|
||||
/// @dev Thrown when the batch index is incorrect.
|
||||
error ErrorIncorrectBatchIndex();
|
||||
|
||||
/// @dev Thrown when the previous state root doesn't match stored one.
|
||||
error ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
/// @dev Thrown when the batch header version is invalid.
|
||||
error ErrorInvalidBatchHeaderVersion();
|
||||
/// @dev Thrown when the batch version is incorrect.
|
||||
error ErrorIncorrectBatchVersion();
|
||||
|
||||
/// @dev Thrown when no blob found in the transaction.
|
||||
error ErrorNoBlobFound();
|
||||
@@ -55,9 +57,6 @@ contract MockBridge {
|
||||
/// @dev Thrown when the number of transactions is less than number of L1 message in one block.
|
||||
error ErrorNumTxsLessThanNumL1Msgs();
|
||||
|
||||
/// @dev Thrown when the given previous state is zero.
|
||||
error ErrorPreviousStateRootIsZero();
|
||||
|
||||
/// @dev Thrown when the given state root is zero.
|
||||
error ErrorStateRootIsZero();
|
||||
|
||||
@@ -70,24 +69,37 @@ contract MockBridge {
|
||||
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
|
||||
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
|
||||
|
||||
struct L2MessageProof {
|
||||
uint256 batchIndex;
|
||||
bytes merkleProof;
|
||||
}
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
|
||||
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
address internal constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
|
||||
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
|
||||
/// point evaluation precompile
|
||||
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
uint256 internal constant BLS_MODULUS =
|
||||
52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint64 public immutable layer2ChainId;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice The maximum number of transactions allowed in each chunk.
|
||||
uint256 public maxNumTxInChunk;
|
||||
|
||||
uint256 public l1BaseFee;
|
||||
uint256 public l1BlobBaseFee;
|
||||
uint256 public l2BaseFee;
|
||||
uint256 public lastFinalizedBatchIndex;
|
||||
|
||||
mapping(uint256 => bytes32) public committedBatches;
|
||||
|
||||
mapping(uint256 => bytes32) public finalizedStateRoots;
|
||||
|
||||
mapping(uint256 => bytes32) public withdrawRoots;
|
||||
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external {
|
||||
@@ -108,6 +120,8 @@ contract MockBridge {
|
||||
*****************************/
|
||||
|
||||
/// @notice Import layer 2 genesis block
|
||||
/// @param _batchHeader The header of the genesis batch.
|
||||
/// @param _stateRoot The state root of the genesis block.
|
||||
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
|
||||
// check genesis batch header length
|
||||
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
@@ -141,16 +155,10 @@ contract MockBridge {
|
||||
bytes[] memory _chunks,
|
||||
bytes calldata
|
||||
) external {
|
||||
// check whether the batch is empty
|
||||
if (_chunks.length == 0) revert ErrorBatchIsEmpty();
|
||||
|
||||
(, bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _loadBatchHeader(
|
||||
_parentBatchHeader
|
||||
(bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _beforeCommitBatch(
|
||||
_parentBatchHeader,
|
||||
_chunks
|
||||
);
|
||||
unchecked {
|
||||
_batchIndex += 1;
|
||||
}
|
||||
if (committedBatches[_batchIndex] != 0) revert ErrorBatchIsAlreadyCommitted();
|
||||
|
||||
bytes32 _batchHash;
|
||||
uint256 batchPtr;
|
||||
@@ -166,7 +174,7 @@ contract MockBridge {
|
||||
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
|
||||
}
|
||||
// store entries, the order matters
|
||||
BatchHeaderV0Codec.storeVersion(batchPtr, _version);
|
||||
BatchHeaderV0Codec.storeVersion(batchPtr, 0);
|
||||
BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
@@ -177,9 +185,10 @@ contract MockBridge {
|
||||
batchPtr,
|
||||
BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH
|
||||
);
|
||||
} else {
|
||||
bytes32 blobVersionedHash;
|
||||
(blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1(
|
||||
} else if (_version <= 2) {
|
||||
// versions 1 and 2 both use ChunkCodecV1 and BatchHeaderV1Codec,
|
||||
// but they use different blob encoding and different verifiers.
|
||||
(_dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1(
|
||||
_totalL1MessagesPoppedOverall,
|
||||
_chunks
|
||||
);
|
||||
@@ -187,56 +196,125 @@ contract MockBridge {
|
||||
batchPtr := mload(0x40)
|
||||
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
|
||||
}
|
||||
|
||||
// store entries, the order matters
|
||||
BatchHeaderV1Codec.storeVersion(batchPtr, _version);
|
||||
BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
BatchHeaderV1Codec.storeDataHash(batchPtr, _dataHash);
|
||||
BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, blobVersionedHash);
|
||||
// Some are using `BatchHeaderV0Codec`, see comments of `BatchHeaderV1Codec`.
|
||||
BatchHeaderV0Codec.storeVersion(batchPtr, _version);
|
||||
BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash);
|
||||
BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, _getBlobVersionedHash());
|
||||
BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash);
|
||||
// compute batch hash
|
||||
_batchHash = BatchHeaderV1Codec.computeBatchHash(
|
||||
// compute batch hash, V1 and V2 has same code as V0
|
||||
_batchHash = BatchHeaderV0Codec.computeBatchHash(
|
||||
batchPtr,
|
||||
BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH
|
||||
);
|
||||
} else {
|
||||
revert ErrorIncorrectBatchVersion();
|
||||
}
|
||||
|
||||
committedBatches[_batchIndex] = _batchHash;
|
||||
emit CommitBatch(_batchIndex, _batchHash);
|
||||
_afterCommitBatch(_batchIndex, _batchHash);
|
||||
}
|
||||
|
||||
/// @dev This function will revert unless all V0/V1/V2 batches are finalized. This is because we start to
|
||||
/// pop L1 messages in `commitBatchWithBlobProof` but not in `commitBatch`. We also introduce `finalizedQueueIndex`
|
||||
/// in `L1MessageQueue`. If one of V0/V1/V2 batches not finalized, `L1MessageQueue.pendingQueueIndex` will not
|
||||
/// match `parentBatchHeader.totalL1MessagePopped` and thus revert.
|
||||
function commitBatchWithBlobProof(
|
||||
uint8 _version,
|
||||
bytes calldata _parentBatchHeader,
|
||||
bytes[] memory _chunks,
|
||||
bytes calldata,
|
||||
bytes calldata _blobDataProof
|
||||
) external {
|
||||
if (_version <= 2) {
|
||||
revert ErrorIncorrectBatchVersion();
|
||||
}
|
||||
|
||||
// allocate memory of batch header and store entries if necessary, the order matters
|
||||
// @note why store entries if necessary, to avoid stack overflow problem.
|
||||
// The codes for `version`, `batchIndex`, `l1MessagePopped`, `totalL1MessagePopped` and `dataHash`
|
||||
// are the same as `BatchHeaderV0Codec`.
|
||||
// The codes for `blobVersionedHash`, and `parentBatchHash` are the same as `BatchHeaderV1Codec`.
|
||||
uint256 batchPtr;
|
||||
assembly {
|
||||
batchPtr := mload(0x40)
|
||||
// This is `BatchHeaderV3Codec.BATCH_HEADER_FIXED_LENGTH`, use `193` here to reduce code
|
||||
// complexity. Be careful that the length may changed in future versions.
|
||||
mstore(0x40, add(batchPtr, 193))
|
||||
}
|
||||
BatchHeaderV0Codec.storeVersion(batchPtr, _version);
|
||||
|
||||
(bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _beforeCommitBatch(
|
||||
_parentBatchHeader,
|
||||
_chunks
|
||||
);
|
||||
BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
|
||||
// versions 2 and 3 both use ChunkCodecV1
|
||||
(bytes32 _dataHash, uint256 _totalL1MessagesPoppedInBatch) = _commitChunksV1(
|
||||
_totalL1MessagesPoppedOverall,
|
||||
_chunks
|
||||
);
|
||||
unchecked {
|
||||
_totalL1MessagesPoppedOverall += _totalL1MessagesPoppedInBatch;
|
||||
}
|
||||
|
||||
BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash);
|
||||
|
||||
// verify blob versioned hash
|
||||
bytes32 _blobVersionedHash = _getBlobVersionedHash();
|
||||
_checkBlobVersionedHash(_blobVersionedHash, _blobDataProof);
|
||||
BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, _blobVersionedHash);
|
||||
BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash);
|
||||
|
||||
uint256 lastBlockTimestamp;
|
||||
{
|
||||
bytes memory lastChunk = _chunks[_chunks.length - 1];
|
||||
lastBlockTimestamp = ChunkCodecV1.getLastBlockTimestamp(lastChunk);
|
||||
}
|
||||
BatchHeaderV3Codec.storeLastBlockTimestamp(batchPtr, lastBlockTimestamp);
|
||||
BatchHeaderV3Codec.storeBlobDataProof(batchPtr, _blobDataProof);
|
||||
|
||||
// compute batch hash, V3 has same code as V0
|
||||
bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(
|
||||
batchPtr,
|
||||
BatchHeaderV3Codec.BATCH_HEADER_FIXED_LENGTH
|
||||
);
|
||||
|
||||
_afterCommitBatch(_batchIndex, _batchHash);
|
||||
}
|
||||
|
||||
/// @dev We keep this function to upgrade to 4844 more smoothly.
|
||||
function finalizeBatchWithProof(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _prevStateRoot,
|
||||
bytes32, /*_prevStateRoot*/
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot,
|
||||
bytes calldata
|
||||
) external {
|
||||
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
|
||||
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
(uint256 batchPtr, bytes32 _batchHash, uint256 _batchIndex) = _beforeFinalizeBatch(
|
||||
_batchHeader,
|
||||
_postStateRoot
|
||||
);
|
||||
|
||||
// compute batch hash and verify
|
||||
(, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
|
||||
|
||||
// verify previous state root.
|
||||
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
// avoid duplicated verification
|
||||
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
|
||||
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
|
||||
lastFinalizedBatchIndex = _batchIndex;
|
||||
// compute public input hash
|
||||
bytes32 _publicInputHash;
|
||||
{
|
||||
bytes32 _dataHash = BatchHeaderV0Codec.getDataHash(batchPtr);
|
||||
bytes32 _prevStateRoot = finalizedStateRoots[_batchIndex - 1];
|
||||
_publicInputHash = keccak256(
|
||||
abi.encodePacked(layer2ChainId, _prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash)
|
||||
);
|
||||
}
|
||||
|
||||
// record state root and withdraw root
|
||||
finalizedStateRoots[_batchIndex] = _postStateRoot;
|
||||
withdrawRoots[_batchIndex] = _withdrawRoot;
|
||||
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
// Pop finalized and non-skipped message from L1MessageQueue.
|
||||
uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr);
|
||||
_afterFinalizeBatch(_totalL1MessagesPoppedOverall, _batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/// @dev Memory layout of `_blobDataProof`:
|
||||
@@ -247,37 +325,140 @@ contract MockBridge {
|
||||
/// ```
|
||||
function finalizeBatchWithProof4844(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _prevStateRoot,
|
||||
bytes32,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot,
|
||||
bytes calldata _blobDataProof,
|
||||
bytes calldata
|
||||
) external {
|
||||
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
|
||||
(uint256 batchPtr, bytes32 _batchHash, uint256 _batchIndex) = _beforeFinalizeBatch(
|
||||
_batchHeader,
|
||||
_postStateRoot
|
||||
);
|
||||
|
||||
// compute public input hash
|
||||
bytes32 _publicInputHash;
|
||||
{
|
||||
bytes32 _dataHash = BatchHeaderV0Codec.getDataHash(batchPtr);
|
||||
bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(batchPtr);
|
||||
bytes32 _prevStateRoot = finalizedStateRoots[_batchIndex - 1];
|
||||
// verify blob versioned hash
|
||||
_checkBlobVersionedHash(_blobVersionedHash, _blobDataProof);
|
||||
_publicInputHash = keccak256(
|
||||
abi.encodePacked(
|
||||
layer2ChainId,
|
||||
_prevStateRoot,
|
||||
_postStateRoot,
|
||||
_withdrawRoot,
|
||||
_dataHash,
|
||||
_blobDataProof[0:64],
|
||||
_blobVersionedHash
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Pop finalized and non-skipped message from L1MessageQueue.
|
||||
uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr);
|
||||
_afterFinalizeBatch(_totalL1MessagesPoppedOverall, _batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
function finalizeBundleWithProof(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot,
|
||||
bytes calldata
|
||||
) external {
|
||||
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
|
||||
// retrieve finalized state root and batch hash from storage
|
||||
uint256 _finalizedBatchIndex = lastFinalizedBatchIndex;
|
||||
|
||||
// compute pending batch hash and verify
|
||||
(, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
|
||||
if (_batchIndex <= _finalizedBatchIndex) revert ErrorBatchIsAlreadyVerified();
|
||||
|
||||
// store in state
|
||||
// @note we do not store intermediate finalized roots
|
||||
lastFinalizedBatchIndex = _batchIndex;
|
||||
finalizedStateRoots[_batchIndex] = _postStateRoot;
|
||||
withdrawRoots[_batchIndex] = _withdrawRoot;
|
||||
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to do common checks before actual batch committing.
|
||||
/// @param _parentBatchHeader The parent batch header in calldata.
|
||||
/// @param _chunks The list of chunks in memory.
|
||||
/// @return _parentBatchHash The batch hash of parent batch header.
|
||||
/// @return _batchIndex The index of current batch.
|
||||
/// @return _totalL1MessagesPoppedOverall The total number of L1 messages popped before current batch.
|
||||
function _beforeCommitBatch(bytes calldata _parentBatchHeader, bytes[] memory _chunks)
|
||||
private
|
||||
view
|
||||
returns (
|
||||
bytes32 _parentBatchHash,
|
||||
uint256 _batchIndex,
|
||||
uint256 _totalL1MessagesPoppedOverall
|
||||
)
|
||||
{
|
||||
// check whether the batch is empty
|
||||
if (_chunks.length == 0) revert ErrorBatchIsEmpty();
|
||||
(, _parentBatchHash, _batchIndex, _totalL1MessagesPoppedOverall) = _loadBatchHeader(_parentBatchHeader);
|
||||
unchecked {
|
||||
_batchIndex += 1;
|
||||
}
|
||||
if (committedBatches[_batchIndex] != 0) revert ErrorBatchIsAlreadyCommitted();
|
||||
}
|
||||
|
||||
/// @dev Internal function to do common checks after actual batch committing.
|
||||
/// @param _batchIndex The index of current batch.
|
||||
/// @param _batchHash The hash of current batch.
|
||||
function _afterCommitBatch(uint256 _batchIndex, bytes32 _batchHash) private {
|
||||
committedBatches[_batchIndex] = _batchHash;
|
||||
emit CommitBatch(_batchIndex, _batchHash);
|
||||
}
|
||||
|
||||
/// @dev Internal function to do common checks before actual batch finalization.
|
||||
/// @param _batchHeader The current batch header in calldata.
|
||||
/// @param _postStateRoot The state root after current batch.
|
||||
/// @return batchPtr The start memory offset of current batch in memory.
|
||||
/// @return _batchHash The hash of current batch.
|
||||
/// @return _batchIndex The index of current batch.
|
||||
function _beforeFinalizeBatch(bytes calldata _batchHeader, bytes32 _postStateRoot)
|
||||
internal
|
||||
view
|
||||
returns (
|
||||
uint256 batchPtr,
|
||||
bytes32 _batchHash,
|
||||
uint256 _batchIndex
|
||||
)
|
||||
{
|
||||
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
|
||||
// compute batch hash and verify
|
||||
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
|
||||
bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr);
|
||||
|
||||
// Calls the point evaluation precompile and verifies the output
|
||||
{
|
||||
(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(
|
||||
abi.encodePacked(_blobVersionedHash, _blobDataProof)
|
||||
);
|
||||
// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
|
||||
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
|
||||
if (!success) revert ErrorCallPointEvaluationPrecompileFailed();
|
||||
(, uint256 result) = abi.decode(data, (uint256, uint256));
|
||||
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
|
||||
}
|
||||
|
||||
// verify previous state root.
|
||||
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
|
||||
(batchPtr, _batchHash, _batchIndex, ) = _loadBatchHeader(_batchHeader);
|
||||
|
||||
// avoid duplicated verification
|
||||
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
|
||||
}
|
||||
|
||||
/// @dev Internal function to do common checks after actual batch finalization.
|
||||
/// @param
|
||||
/// @param _batchIndex The index of current batch.
|
||||
/// @param _batchHash The hash of current batch.
|
||||
/// @param _postStateRoot The state root after current batch.
|
||||
/// @param _withdrawRoot The withdraw trie root after current batch.
|
||||
function _afterFinalizeBatch(
|
||||
uint256,
|
||||
uint256 _batchIndex,
|
||||
bytes32 _batchHash,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot
|
||||
) internal {
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
|
||||
@@ -291,19 +472,43 @@ contract MockBridge {
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
/// @dev Internal function to check blob versioned hash.
|
||||
/// @param _blobVersionedHash The blob versioned hash to check.
|
||||
/// @param _blobDataProof The blob data proof used to verify the blob versioned hash.
|
||||
function _checkBlobVersionedHash(bytes32 _blobVersionedHash, bytes calldata _blobDataProof) internal view {
|
||||
// Calls the point evaluation precompile and verifies the output
|
||||
(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(
|
||||
abi.encodePacked(_blobVersionedHash, _blobDataProof)
|
||||
);
|
||||
// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
|
||||
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
|
||||
if (!success) revert ErrorCallPointEvaluationPrecompileFailed();
|
||||
(, uint256 result) = abi.decode(data, (uint256, uint256));
|
||||
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
|
||||
}
|
||||
|
||||
/// @dev Internal function to get the blob versioned hash.
|
||||
/// @return _blobVersionedHash The retrieved blob versioned hash.
|
||||
function _getBlobVersionedHash() internal virtual returns (bytes32 _blobVersionedHash) {
|
||||
bytes32 _secondBlob;
|
||||
// Get blob's versioned hash
|
||||
assembly {
|
||||
_blobVersionedHash := blobhash(0)
|
||||
_secondBlob := blobhash(1)
|
||||
}
|
||||
if (_blobVersionedHash == bytes32(0)) revert ErrorNoBlobFound();
|
||||
if (_secondBlob != bytes32(0)) revert ErrorFoundMultipleBlobs();
|
||||
}
|
||||
|
||||
/// @dev Internal function to commit chunks with version 0
|
||||
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks.
|
||||
/// @param _chunks The list of chunks to commit.
|
||||
/// @return _batchDataHash The computed data hash for the list of chunks.
|
||||
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one.
|
||||
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages popped in this batch, including skipped one.
|
||||
function _commitChunksV0(
|
||||
uint256 _totalL1MessagesPoppedOverall,
|
||||
bytes[] memory _chunks
|
||||
) internal pure returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) {
|
||||
) internal view returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) {
|
||||
uint256 _chunksLength = _chunks.length;
|
||||
|
||||
// load `batchDataHashPtr` and reserve the memory region for chunk data hashes
|
||||
@@ -341,32 +546,12 @@ contract MockBridge {
|
||||
/// @dev Internal function to commit chunks with version 1
|
||||
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks.
|
||||
/// @param _chunks The list of chunks to commit.
|
||||
/// @return _blobVersionedHash The blob versioned hash for the blob carried in this transaction.
|
||||
/// @return _batchDataHash The computed data hash for the list of chunks.
|
||||
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one.
|
||||
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages popped in this batch, including skipped one.
|
||||
function _commitChunksV1(
|
||||
uint256 _totalL1MessagesPoppedOverall,
|
||||
bytes[] memory _chunks
|
||||
)
|
||||
internal
|
||||
view
|
||||
returns (
|
||||
bytes32 _blobVersionedHash,
|
||||
bytes32 _batchDataHash,
|
||||
uint256 _totalL1MessagesPoppedInBatch
|
||||
)
|
||||
{
|
||||
{
|
||||
bytes32 _secondBlob;
|
||||
// Get blob's versioned hash
|
||||
assembly {
|
||||
_blobVersionedHash := blobhash(0)
|
||||
_secondBlob := blobhash(1)
|
||||
}
|
||||
if (_blobVersionedHash == bytes32(0)) revert ErrorNoBlobFound();
|
||||
if (_secondBlob != bytes32(0)) revert ErrorFoundMultipleBlob();
|
||||
}
|
||||
|
||||
) internal view returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) {
|
||||
uint256 _chunksLength = _chunks.length;
|
||||
|
||||
// load `batchDataHashPtr` and reserve the memory region for chunk data hashes
|
||||
@@ -424,22 +609,25 @@ contract MockBridge {
|
||||
version := shr(248, calldataload(_batchHeader.offset))
|
||||
}
|
||||
|
||||
// version should be always 0 or 1 in current code
|
||||
uint256 _length;
|
||||
if (version == 0) {
|
||||
(batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader);
|
||||
_batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr);
|
||||
} else {
|
||||
} else if (version <= 2) {
|
||||
(batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader);
|
||||
_batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr);
|
||||
} else if (version >= 3) {
|
||||
(batchPtr, _length) = BatchHeaderV3Codec.loadAndValidate(_batchHeader);
|
||||
}
|
||||
|
||||
// the code for compute batch hash is the same for V0, V1, V2, V3
|
||||
// also the `_batchIndex` and `_totalL1MessagesPoppedOverall`.
|
||||
_batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr);
|
||||
_totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr);
|
||||
|
||||
// only check when genesis is imported
|
||||
if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) {
|
||||
revert ErrorIncorrectBatchHash();
|
||||
}
|
||||
_totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr);
|
||||
}
|
||||
|
||||
/// @dev Internal function to commit a chunk with version 0.
|
||||
@@ -452,7 +640,7 @@ contract MockBridge {
|
||||
bytes memory _chunk,
|
||||
uint256 _totalL1MessagesPoppedInBatch,
|
||||
uint256 _totalL1MessagesPoppedOverall
|
||||
) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) {
|
||||
) internal view returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) {
|
||||
uint256 chunkPtr;
|
||||
uint256 startDataPtr;
|
||||
uint256 dataPtr;
|
||||
@@ -481,6 +669,8 @@ contract MockBridge {
|
||||
}
|
||||
}
|
||||
|
||||
// It is used to compute the actual number of transactions in chunk.
|
||||
uint256 txHashStartDataPtr = dataPtr;
|
||||
// concatenate tx hashes
|
||||
uint256 l2TxPtr = ChunkCodecV0.getL2TxPtr(chunkPtr, _numBlocks);
|
||||
chunkPtr += 1;
|
||||
@@ -510,6 +700,9 @@ contract MockBridge {
|
||||
}
|
||||
}
|
||||
|
||||
// check the actual number of transactions in the chunk
|
||||
if ((dataPtr - txHashStartDataPtr) / 32 > maxNumTxInChunk) revert ErrorTooManyTxsInOneChunk();
|
||||
|
||||
assembly {
|
||||
chunkPtr := add(_chunk, 0x20)
|
||||
}
|
||||
@@ -532,7 +725,7 @@ contract MockBridge {
|
||||
bytes memory _chunk,
|
||||
uint256 _totalL1MessagesPoppedInBatch,
|
||||
uint256 _totalL1MessagesPoppedOverall
|
||||
) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) {
|
||||
) internal view returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) {
|
||||
uint256 chunkPtr;
|
||||
uint256 startDataPtr;
|
||||
uint256 dataPtr;
|
||||
@@ -568,7 +761,7 @@ contract MockBridge {
|
||||
uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr);
|
||||
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages
|
||||
_totalTransactionsInChunk += (dataPtr - startPtr) / 32; // number of non-skipped l1 messages
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs
|
||||
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
|
||||
@@ -578,6 +771,11 @@ contract MockBridge {
|
||||
}
|
||||
}
|
||||
|
||||
// check the actual number of transactions in the chunk
|
||||
if (_totalTransactionsInChunk > maxNumTxInChunk) {
|
||||
revert ErrorTooManyTxsInOneChunk();
|
||||
}
|
||||
|
||||
// compute data hash and store to memory
|
||||
assembly {
|
||||
_dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
|
||||
|
||||
@@ -204,10 +204,8 @@ func TestFunction(t *testing.T) {
|
||||
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch)
|
||||
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
|
||||
t.Run("TestCommitBatchAndFinalizeBatch4844", testCommitBatchAndFinalizeBatch4844)
|
||||
t.Run("TestCommitBatchAndFinalizeBatchBeforeAndAfter4844", testCommitBatchAndFinalizeBatchBeforeAndAfter4844)
|
||||
t.Run("TestCommitBatchAndFinalizeBatchBeforeAndAfterCompression", testCommitBatchAndFinalizeBatchBeforeAndAfterCompression)
|
||||
t.Run("testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions", testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions)
|
||||
t.Run("TestCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions", testCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions)
|
||||
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
|
||||
@@ -52,271 +52,26 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) {
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(batch.RollupStatus))
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// add some blocks to db
|
||||
var blocks []*encoding.Block
|
||||
for i := int64(0); i < 10; i++ {
|
||||
header := gethTypes.Header{
|
||||
Number: big.NewInt(i + 1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
Root: common.HexToHash("0x1"),
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.HexToHash("0x2"),
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
})
|
||||
}
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
|
||||
bp.TryProposeBatch()
|
||||
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
batch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBatch4844(t *testing.T) {
|
||||
compressionTests := []bool{false, true} // false for uncompressed, true for compressed
|
||||
for _, compressed := range compressionTests {
|
||||
func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
var chainConfig *params.ChainConfig
|
||||
if compressed {
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
}
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add some blocks to db
|
||||
var blocks []*encoding.Block
|
||||
for i := int64(0); i < 10; i++ {
|
||||
header := gethTypes.Header{
|
||||
Number: big.NewInt(i + 1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
Root: common.HexToHash("0x1"),
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.HexToHash("0x2"),
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
})
|
||||
}
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 1,
|
||||
MaxL1CommitCalldataSizePerChunk: 100000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 100000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
|
||||
bp.TryProposeBatch()
|
||||
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
batch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
l2Relayer.StopSenders()
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBatchBeforeAndAfter4844(t *testing.T) {
|
||||
compressionTests := []bool{false, true} // false for uncompressed, true for compressed
|
||||
for _, compressed := range compressionTests {
|
||||
db := setupDB(t)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
var chainConfig *params.ChainConfig
|
||||
if compressed {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(5), CurieBlock: big.NewInt(5)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(5)}
|
||||
}
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -338,10 +93,6 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfter4844(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
@@ -352,83 +103,128 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfter4844(t *testing.T) {
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: 1000000,
|
||||
BundleTimeoutSec: 300,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[:5])
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
bap.TryProposeBatch()
|
||||
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[5:])
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
bp.TryProposeBatch()
|
||||
bp.TryProposeBatch()
|
||||
bap.TryProposeBatch()
|
||||
|
||||
for i := uint64(0); i < 2; i++ {
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.GetBatchByIndex(context.Background(), i+1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
bup.TryProposeBundle() // The proposed bundle contains two batches when codec version is codecv3.
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
assert.Eventually(t, func() bool {
|
||||
batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, getErr)
|
||||
assert.Len(t, batches, 3)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
batchProof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
}
|
||||
|
||||
bundleProof := &message.BundleProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
for _, bundle := range bundles {
|
||||
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
l2Relayer.ProcessPendingBundles()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 3)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, getErr)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
}
|
||||
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
if codecVersion == encoding.CodecV0 || codecVersion == encoding.CodecV1 || codecVersion == encoding.CodecV2 {
|
||||
assert.Len(t, bundles, 0)
|
||||
} else {
|
||||
assert.Len(t, bundles, 1)
|
||||
bundle := bundles[0]
|
||||
if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, bundle.FinalizeTxHash)
|
||||
receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
for _, batch := range batches {
|
||||
assert.Equal(t, batch.RollupStatus, bundle.RollupStatus)
|
||||
assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
l2Relayer.StopSenders()
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) {
|
||||
func testCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -436,7 +232,7 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(5)}
|
||||
chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(1), CurieBlock: big.NewInt(2), DarwinTime: func() *uint64 { t := uint64(4); return &t }()}
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
@@ -450,6 +246,7 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) {
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
Root: common.HexToHash("0x1"),
|
||||
Time: uint64(i + 1),
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: &header,
|
||||
@@ -473,74 +270,130 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) {
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: 1000000,
|
||||
BundleTimeoutSec: 300,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
cp.TryProposeChunk()
|
||||
cp.TryProposeChunk()
|
||||
cp.TryProposeChunk()
|
||||
cp.TryProposeChunk()
|
||||
bp.TryProposeBatch()
|
||||
bp.TryProposeBatch()
|
||||
|
||||
for i := uint64(0); i < 2; i++ {
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.GetBatchByIndex(context.Background(), i+1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
bap.TryProposeBatch()
|
||||
bap.TryProposeBatch()
|
||||
bap.TryProposeBatch()
|
||||
bap.TryProposeBatch()
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
bup.TryProposeBundle()
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
batchOrm := orm.NewBatch(db)
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, getErr)
|
||||
assert.Len(t, batches, 4)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
batchProof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 600)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// process committed batch and check status
|
||||
bundleProof := &message.BundleProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
for _, bundle := range bundles {
|
||||
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.Len(t, batches, 4)
|
||||
batches = batches[1:2]
|
||||
for _, batch := range batches {
|
||||
if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
}
|
||||
receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, getErr)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
}
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
l2Relayer.ProcessPendingBundles()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 4)
|
||||
batches = batches[3:]
|
||||
for _, batch := range batches {
|
||||
if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, getErr)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
}
|
||||
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, bundles, 1)
|
||||
bundle := bundles[0]
|
||||
if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, bundle.FinalizeTxHash)
|
||||
receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
batches, err = batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
for _, batch := range batches {
|
||||
assert.Equal(t, batch.RollupStatus, bundle.RollupStatus)
|
||||
assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash)
|
||||
}
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
}
|
||||
|
||||
Submodule scroll-contracts updated: ca7f0768b6...2ac4f3f7e0
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user